diff --git a/packages/windmill/Cargo.toml b/packages/windmill/Cargo.toml index 30289abf01..d784990b5c 100644 --- a/packages/windmill/Cargo.toml +++ b/packages/windmill/Cargo.toml @@ -143,7 +143,18 @@ dhat = "0.3" [package.metadata.component.target.dependencies] "docs:plugin" = { path = "packages/sequent-core/src/wit/plugin_interface.wit" } +[lints.rustdoc] +missing_crate_level_docs = "deny" + + +[lints.rust] +missing_docs = "deny" + + [lints.clippy] +missing_docs_in_private_items = "deny" +missing_errors_doc = "deny" +missing_panics_doc = "deny" arithmetic_side_effects = "deny" complexity = "deny" style = "deny" diff --git a/packages/windmill/external-bin/generate_logs.rs b/packages/windmill/external-bin/generate_logs.rs index 9fd865aa1d..6c7509062d 100644 --- a/packages/windmill/external-bin/generate_logs.rs +++ b/packages/windmill/external-bin/generate_logs.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Command-line tool to generate a CSV report of activity logs from immudb. use anyhow::{Context, Result}; use base64::engine::general_purpose; use base64::Engine; @@ -43,15 +43,21 @@ struct Cli { } #[derive(Deserialize, Debug)] +/// Configuration for the generate_logs tool. struct Config { + /// Immudb URL immudb_url: String, + /// Immudb username immudb_user: String, + /// Immudb password immudb_password: String, - elections: HashMap, // election_id -> election_name (for CSV filename) + /// Election ID -> Election Name (for CSV filename) + elections: HashMap, } // --- Helper Functions --- +/// Sanitizes a filename by replacing non-alphanumeric characters with underscores. fn sanitize_filename(name: &str) -> String { name.chars() .map(|c| match c { diff --git a/packages/windmill/src/bin/beat.rs b/packages/windmill/src/bin/beat.rs index b99a46c0b8..95746b0696 100644 --- a/packages/windmill/src/bin/beat.rs +++ b/packages/windmill/src/bin/beat.rs @@ -1,5 +1,6 @@ #![allow(non_upper_case_globals)] #![recursion_limit = "256"] +//! Celery Beat process for Windmill: registers periodic tasks and publishes them to RabbitMQ. // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only @@ -18,19 +19,25 @@ use windmill::tasks::review_boards::review_boards; use windmill::tasks::scheduled_events::scheduled_events; use windmill::tasks::scheduled_reports::scheduled_reports; +/// Beat tick intervals for periodic tasks (all values are in seconds). #[derive(Debug, Parser)] #[command(name = "beat", about = "Windmill's periodic task scheduler.")] struct CeleryOpt { + /// Interval between `review_boards` dispatches. #[arg(short = 'r', long, default_value = "15")] review_boards_interval: u64, + /// Interval between `scheduled_events` dispatches. #[arg(short = 's', long, default_value = "10")] schedule_events_interval: u64, + /// Interval between `scheduled_reports` dispatches. #[arg(short = 'c', long, default_value = "60")] schedule_reports_interval: u64, + /// Interval between `electoral_log_batch_dispatcher` dispatches. #[arg(short = 'e', long, default_value = "5")] electoral_log_interval: u64, } +/// Starts the beat scheduler: loads env, wires periodic tasks, and blocks until shutdown. #[tokio::main] async fn main() -> Result<()> { dotenv().ok(); diff --git a/packages/windmill/src/bin/main.rs b/packages/windmill/src/bin/main.rs index 9da1570482..34de5601c8 100644 --- a/packages/windmill/src/bin/main.rs +++ b/packages/windmill/src/bin/main.rs @@ -1,5 +1,6 @@ #![allow(non_upper_case_globals)] #![recursion_limit = "256"] +//! Celery worker binary for Windmill: runs the Celery app as a queue consumer or in produce-only mode. // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only @@ -20,6 +21,11 @@ use windmill::services::celery_app::*; use windmill::services::probe::{setup_probe, AppName}; use windmill::services::tasks_semaphore::init_semaphore; +/// Returns the AMQP queue name for `queue` prefixed with `ENV_SLUG`. +/// +/// # Panics +/// +/// Panics if `ENV_SLUG` is not set in the environment. fn get_queue_name(queue: Queue) -> String { let slug = std::env::var("ENV_SLUG") .with_context(|| "missing env var ENV_SLUG") @@ -38,28 +44,39 @@ lazy_static! { static ref ELECTORAL_LOG_BATCH_QUEUE_NAME: String = get_queue_name(Queue::ElectoralLogBatch); } +/// Celery options for the Windmill Celery worker process. #[derive(Debug, Parser, Clone)] #[command(name = "windmill", about = "Windmill task queue prosumer.")] enum CeleryOpt { + /// Consume tasks from one or more AMQP queues. Consume { + /// Queue names to bind. #[arg(short, long, num_args(1..), default_values_t = vec![BEAT_QUEUE_NAME.clone()])] queues: Vec, + /// Maximum unacknowledged messages per consumer. #[arg(short, long, default_value = "100")] prefetch_count: u16, + /// When true, acknowledgements are sent after the task body returns. #[arg(short, long)] acks_late: bool, + /// Default retry cap Celery applies before marking a task failed. #[arg(short, long, default_value = "4")] task_max_retries: u32, + /// Retries when establishing the broker connection before exiting. #[arg(short, long, default_value = "5")] broker_connection_max_retries: u32, + /// Broker heartbeat interval in seconds. #[arg(short = 'H', long, default_value = "10")] heartbeat: u16, + /// Tokio worker thread count for the runtime (defaults to logical CPUs). #[arg(short, long)] worker_threads: Option, }, + /// Connect to the broker, log readiness, and exit without consuming. Produce, } +/// Finds duplicates in a vector of strings. fn find_duplicates(input: Vec<&str>) -> Vec<&str> { let mut occurrences = HashMap::new(); let mut duplicates = Vec::new(); @@ -77,6 +94,7 @@ fn find_duplicates(input: Vec<&str>) -> Vec<&str> { duplicates } +/// Resolves the async runtime worker thread count from `Consume` options or CPU count. fn read_worker_threads(opt: &CeleryOpt) -> usize { match opt.clone() { CeleryOpt::Consume { worker_threads, .. } => worker_threads, @@ -85,6 +103,7 @@ fn read_worker_threads(opt: &CeleryOpt) -> usize { .unwrap_or(num_cpus::get()) } +/// Entry point: builds the multi-thread runtime and runs async worker. fn main() -> Result<(), Box> { dotenv().ok(); @@ -106,6 +125,7 @@ fn main() -> Result<(), Box> { Ok(()) } +/// Runs the Celery app. async fn async_main(opt: CeleryOpt) -> Result<()> { init_log(true); setup_probe(AppName::WINDMILL).await; diff --git a/packages/windmill/src/lib.rs b/packages/windmill/src/lib.rs index 38ee49a2eb..7369d40758 100644 --- a/packages/windmill/src/lib.rs +++ b/packages/windmill/src/lib.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Windmill executes background work for the Sequent platform: Celery tasks, +//! Hasura-backed Postgres access, Keycloak and vault integration, reports, +//! imports/exports, and WASM-backed plugins. #![allow(clippy::too_many_arguments)] #![recursion_limit = "256"] #[macro_use] diff --git a/packages/windmill/src/postgres/application.rs b/packages/windmill/src/postgres/application.rs index 610f78ade1..a7f734d998 100644 --- a/packages/windmill/src/postgres/application.rs +++ b/packages/windmill/src/postgres/application.rs @@ -10,25 +10,28 @@ use crate::{ }; #[derive(Clone, Debug)] +/// Enrollment filters pub struct EnrollmentFilters { + /// Current lifecycle or workflow status. pub status: ApplicationStatus, + /// How the applicant was or will be verified. pub verification_type: Option, } use anyhow::{anyhow, Context, Result}; -use deadpool_postgres::Transaction; -use sequent_core::types::hasura::core::Application; -use serde_json::Value; -use tokio_postgres::row::Row; -// use tokio_postgres::types::ToSql; use chrono::DateTime; use chrono::Local; +use deadpool_postgres::Transaction; use sequent_core::services::uuid_validation::parse_uuid_v4; +use sequent_core::types::hasura::core::Application; use serde::Serialize; use serde_json::json; +use serde_json::Value; +use tokio_postgres::row::Row; use tokio_postgres::types::{Json, ToSql}; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Row representing Application wrapper pub struct ApplicationWrapper(pub Application); impl TryFrom for ApplicationWrapper { @@ -53,6 +56,12 @@ impl TryFrom for ApplicationWrapper { })) } } +/// Get permission label for a given post from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn get_permission_label_from_post( @@ -105,6 +114,11 @@ pub async fn get_permission_label_from_post( Ok((permission_label, area_id)) } +/// Insert application into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_application( @@ -174,6 +188,12 @@ pub async fn insert_application( Ok(()) } +/// Updates application status and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn update_application_status( @@ -279,6 +299,17 @@ pub async fn update_application_status( Ok(application) } +/// Get applications for a given area, tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. +/// +/// # Panics +/// +/// Panics only if internal SQL placeholder arithmetic overflows, +/// which is not expected in production-sized filters. #[instrument(err, skip_all)] pub async fn get_applications( @@ -377,6 +408,17 @@ pub async fn get_applications( Ok((results, last_offset)) } +/// Counts applications based on filters. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. +/// +/// # Panics +/// +/// Panics only if internal SQL placeholder arithmetic overflows, +/// which is not expected in production-sized filters. #[instrument(err, skip_all)] pub async fn count_applications( @@ -488,6 +530,12 @@ pub async fn count_applications( Ok(count) } +/// Get applications for a given election from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_applications_by_election( @@ -529,6 +577,12 @@ pub async fn get_applications_by_election( Ok(results) } +/// Insert applications into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_applications( diff --git a/packages/windmill/src/postgres/area.rs b/packages/windmill/src/postgres/area.rs index 32ac51b0b6..1e74008478 100644 --- a/packages/windmill/src/postgres/area.rs +++ b/packages/windmill/src/postgres/area.rs @@ -15,6 +15,7 @@ use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; +/// Newtype for converting a `tokio_postgres::Row` from `sequent_backend.area` into [`Area`]. pub struct AreaWrapper(pub Area); impl TryFrom for AreaWrapper { @@ -38,10 +39,13 @@ impl TryFrom for AreaWrapper { })) } } -/** - * Returns a vector of areas per election event, with the posibility of - * filtering by area_id - */ +/// Returns a vector of areas per election event, with the posibility of +/// filtering by area_id +/// +/// # Errors +/// +/// Fails if any `area_ids` entry is not a valid UUID, if `tenant_id` / `election_event_id` +/// cannot be parsed, or if preparing or executing the query fails. #[instrument(skip(hasura_transaction, area_ids), err)] pub async fn get_areas( hasura_transaction: &Transaction<'_>, @@ -101,9 +105,11 @@ pub async fn get_areas( Ok(areas) } -/** - * Returns a map of areas per election event by name - */ +/// Maps area display name → area id for every area in the election event. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters or database errors while preparing or running the query. #[instrument(skip(hasura_transaction), err)] pub async fn get_areas_by_name( hasura_transaction: &Transaction<'_>, @@ -151,9 +157,11 @@ pub async fn get_areas_by_name( Ok(areas_map) } -/** - * Returns a map of area-names of an election event addressable by area-id - */ +/// Maps area id → display name for every area in the election event. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters or database errors while preparing or running the query. #[instrument(skip(hasura_transaction), err)] pub async fn get_areas_by_id( hasura_transaction: &Transaction<'_>, @@ -201,10 +209,11 @@ pub async fn get_areas_by_id( Ok(areas_map) } -/** - * Returns a hash map with the list of elections (Vec value) associated - * with each area (String key). - */ +/// Builds, for each area id, the list of election ids linked through `area_contest` → `contest`. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters or database errors while preparing or running the query. #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_by_area( hasura_transaction: &Transaction<'_>, @@ -261,10 +270,11 @@ pub async fn get_elections_by_area( Ok(areas_to_elections) } -/** - * Returns a vector of areas per election event, with the posibility of - * filtering by area_id - */ +/// Returns the full [`Area`] row for `area_id` in `tenant_id`, or `None` when no row exists. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters, if row decoding into [`Area`] fails, or on database errors. #[instrument(skip(hasura_transaction), err)] pub async fn get_area_by_id( hasura_transaction: &Transaction<'_>, @@ -310,7 +320,12 @@ pub async fn get_area_by_id( Ok(areas.first().cloned()) } - +/// Updates `parent_id` on each [`Area`] row to match the in-memory tree ordering from import. +/// +/// # Errors +/// +/// Fails when a UUID on an area cannot be parsed, when preparing or executing an `UPDATE` fails, +/// or when Postgres returns an error for any row in the batch. #[instrument(err, skip_all)] pub async fn upsert_area_parents( hasura_transaction: &Transaction<'_>, @@ -353,7 +368,12 @@ pub async fn upsert_area_parents( Ok(()) } - +/// Insert areas into the database. +/// +/// # Errors +/// +/// Fails if the area tree cannot be built, if an area id is missing from the map, if any UUID +/// field is invalid, or if an `INSERT` fails at the database layer. #[instrument(err, skip_all)] pub async fn insert_areas(hasura_transaction: &Transaction<'_>, areas: &[Area]) -> Result<()> { let tree_node_areas: Vec = areas.iter().map(|area| area.into()).collect(); @@ -408,7 +428,11 @@ pub async fn insert_areas(hasura_transaction: &Transaction<'_>, areas: &[Area]) Ok(()) } - +/// Returns every area row for the tenant and election event. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters, when decoding a row into [`Area`] fails, or on database errors. #[instrument(err, skip_all)] pub async fn get_event_areas( hasura_transaction: &Transaction<'_>, @@ -447,14 +471,20 @@ pub async fn get_event_areas( Ok(election_events) } +/// Minimal area fields returned when listing areas tied to a specific election. #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct AreaElection { + /// Area primary key as a string UUID. pub id: String, + /// Area name. pub name: Option, + /// Area description. pub description: Option, + /// Optional free-form operator notes serialized as a string. pub annotations: Option, } +/// Newtype mapping `sequent_backend.area` projection rows into [`AreaElection`]. pub struct AreaElectionWrapper(pub AreaElection); impl TryFrom for AreaElectionWrapper { @@ -469,9 +499,11 @@ impl TryFrom for AreaElectionWrapper { } } -/** - * Returns a vec of the areas related to giving election. - */ +/// Returns distinct [`Area`] rows linked to `election_id` through `area_contest` → `contest`. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters, when decoding rows fails, or when the `SELECT DISTINCT` query fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_areas_by_election_id( hasura_transaction: &Transaction<'_>, @@ -524,7 +556,11 @@ pub async fn get_areas_by_election_id( Ok(areas) } -/// Returns a vector of areas per tenant and election event, filtered by a list of area_ids +/// Returns full [`Area`] rows for the subset of ids in `area_ids` within the tenant and event. +/// +/// # Errors +/// +/// Fails if any id string is not a valid UUID or if the `id = ANY($3)` query fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_areas_by_ids( hasura_transaction: &Transaction<'_>, @@ -568,7 +604,11 @@ pub async fn get_areas_by_ids( Ok(areas) } - +/// Removes all `sequent_backend.area_contest` rows for one area before re-linking contests. +/// +/// # Errors +/// +/// Fails on invalid UUID strings for `tenant_id` / `area_id`, or when the `DELETE` cannot be executed. #[instrument(skip(hasura_transaction), err)] pub async fn delete_area_contests( hasura_transaction: &Transaction<'_>, @@ -601,7 +641,12 @@ pub async fn delete_area_contests( Ok(()) } - +/// Persists label, presentation, hierarchy, and metadata changes for an existing [`Area`] row. +/// +/// # Errors +/// +/// Fails when UUID fields on `area` cannot be parsed, when preparing or executing the `UPDATE` fails, +/// or when Postgres rejects the statement (constraint violation, connection error, …). #[instrument(err, skip_all)] pub async fn update_area(hasura_transaction: &Transaction<'_>, area: Area) -> Result<()> { let statement = hasura_transaction @@ -648,7 +693,11 @@ pub async fn update_area(hasura_transaction: &Transaction<'_>, area: Area) -> Re Ok(()) } - +/// Inserts a single [`Area`] row. +/// +/// # Errors +/// +/// Fails on invalid UUIDs in `area`, when the `INSERT` cannot be prepared or executed, or on unique violations. #[instrument(err, skip_all)] pub async fn insert_area(hasura_transaction: &Transaction<'_>, area: Area) -> Result<()> { let statement = hasura_transaction diff --git a/packages/windmill/src/postgres/area_contest.rs b/packages/windmill/src/postgres/area_contest.rs index c3954dc2dc..034f7f0ff5 100644 --- a/packages/windmill/src/postgres/area_contest.rs +++ b/packages/windmill/src/postgres/area_contest.rs @@ -9,6 +9,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Row representing Area contest wrapper pub struct AreaContestWrapper(pub AreaContest); impl TryFrom for AreaContestWrapper { @@ -22,6 +23,12 @@ impl TryFrom for AreaContestWrapper { })) } } +/// Insert area to area contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn insert_area_to_area_contests( @@ -49,6 +56,12 @@ pub async fn insert_area_to_area_contests( .await?; Ok(()) } +/// Insert area contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_area_contests( @@ -86,6 +99,12 @@ pub async fn insert_area_contests( Ok(()) } +/// Get all area contests for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn export_area_contests( @@ -127,6 +146,12 @@ pub async fn export_area_contests( Ok(area_contests) } +/// Get areas by contest id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_areas_by_contest_id( @@ -166,6 +191,12 @@ pub async fn get_areas_by_contest_id( Ok(area_ids) } +/// Get area contests by area contest ids from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_area_contests_by_area_contest_ids( diff --git a/packages/windmill/src/postgres/ballot_publication.rs b/packages/windmill/src/postgres/ballot_publication.rs index db65b5e9b9..e4c83b50f4 100644 --- a/packages/windmill/src/postgres/ballot_publication.rs +++ b/packages/windmill/src/postgres/ballot_publication.rs @@ -13,6 +13,7 @@ use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; +/// Row representing Ballot publication wrapper pub struct BallotPublicationWrapper(pub BallotPublication); impl TryFrom for BallotPublicationWrapper { @@ -44,6 +45,12 @@ impl TryFrom for BallotPublicationWrapper { })) } } +/// Get ballot publication by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_ballot_publication_by_id( @@ -88,6 +95,12 @@ pub async fn get_ballot_publication_by_id( Ok(results.first().cloned()) } +/// Update ballot publication status and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. pub async fn update_ballot_publication_status( hasura_transaction: &Transaction<'_>, @@ -139,6 +152,12 @@ pub async fn update_ballot_publication_status( Ok(results.first().cloned()) } +/// Update ballot publication and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. pub async fn update_ballot_publication( hasura_transaction: &Transaction<'_>, @@ -189,6 +208,12 @@ pub async fn update_ballot_publication( Ok(results.first().cloned()) } +/// Get latest ballot publication from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_latest_ballot_publication( @@ -234,6 +259,12 @@ pub async fn get_latest_ballot_publication( Ok(results.first().cloned()) } +/// Get ballot publication for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_ballot_publication( @@ -276,6 +307,12 @@ pub async fn get_ballot_publication( Ok(results) } +/// Insert ballot publication into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn insert_ballot_publication( @@ -332,6 +369,12 @@ pub async fn insert_ballot_publication( Ok(results.first().cloned()) } +/// Get previous publication for a given election from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_previous_publication_election( @@ -391,6 +434,12 @@ pub async fn get_previous_publication_election( Ok(results.first().cloned()) } +/// Get previous publication from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_previous_publication( @@ -448,6 +497,12 @@ pub async fn get_previous_publication( Ok(results.first().cloned()) } +/// Supports the soft delete other ballot publications workflow. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn soft_delete_other_ballot_publications( diff --git a/packages/windmill/src/postgres/ballot_style.rs b/packages/windmill/src/postgres/ballot_style.rs index 93eab65c35..0ad2fd9fa9 100644 --- a/packages/windmill/src/postgres/ballot_style.rs +++ b/packages/windmill/src/postgres/ballot_style.rs @@ -12,6 +12,7 @@ use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; +/// Row representing Ballot style wrapper pub struct BallotStyleWrapper(pub BallotStyle); impl TryFrom for BallotStyleWrapper { @@ -39,6 +40,12 @@ impl TryFrom for BallotStyleWrapper { })) } } +/// Insert ballot style into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction, ballot_eml))] pub async fn insert_ballot_style( @@ -104,6 +111,12 @@ pub async fn insert_ballot_style( elements.first().cloned().ok_or(anyhow!("Row not inserted")) } +/// Get all ballot styles for a given tenant, area and authorized election ids from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_all_ballot_styles( @@ -145,6 +158,12 @@ pub async fn get_all_ballot_styles( Ok(results) } +/// Get all ballot styles for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn export_event_ballot_styles( @@ -187,6 +206,12 @@ pub async fn export_event_ballot_styles( Ok(results) } +/// Get ballot styles by elections from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_ballot_styles_by_elections( @@ -240,6 +265,12 @@ pub async fn get_ballot_styles_by_elections( Ok(results) } +/// Get publication ballot styles for a given tenant, election event and ballot publication id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_publication_ballot_styles( diff --git a/packages/windmill/src/postgres/candidate.rs b/packages/windmill/src/postgres/candidate.rs index 23a352a1c0..f14b7d0248 100644 --- a/packages/windmill/src/postgres/candidate.rs +++ b/packages/windmill/src/postgres/candidate.rs @@ -16,6 +16,7 @@ use tokio_stream::StreamExt; // Added for streaming use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Row representing Candidate wrapper pub struct CandidateWrapper(pub Candidate); impl TryFrom for CandidateWrapper { @@ -42,6 +43,12 @@ impl TryFrom for CandidateWrapper { })) } } +/// Insert candidates into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_candidates( @@ -91,6 +98,12 @@ pub async fn insert_candidates( Ok(()) } +/// Get all candidates for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn export_candidates( @@ -132,6 +145,12 @@ pub async fn export_candidates( Ok(election_events) } +/// Get candidates by contest id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_candidates_by_contest_id( @@ -176,6 +195,12 @@ pub async fn get_candidates_by_contest_id( Ok(candidate) } +/// Export candidates for a given contests ids to a CSV file. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn export_candidate_csv( diff --git a/packages/windmill/src/postgres/cast_vote.rs b/packages/windmill/src/postgres/cast_vote.rs index e42596c87f..265350402b 100644 --- a/packages/windmill/src/postgres/cast_vote.rs +++ b/packages/windmill/src/postgres/cast_vote.rs @@ -12,6 +12,12 @@ use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; +/// Insert cast vote into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction, content, cast_ballot_signature), err)] pub async fn insert_cast_vote( hasura_transaction: &Transaction<'_>, @@ -98,6 +104,12 @@ pub async fn insert_cast_vote( } } +/// Get cast votes for a given tenant, election event, election id and voter id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_cast_votes( hasura_transaction: &Transaction<'_>, @@ -158,6 +170,12 @@ pub async fn get_cast_votes( Ok(cast_votes) } +/// Get cast votes for a given tenant, election event and election id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_cast_votes_by_election_id( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/postgres/certificate_authority.rs b/packages/windmill/src/postgres/certificate_authority.rs index dd27866a74..0d5924895c 100644 --- a/packages/windmill/src/postgres/certificate_authority.rs +++ b/packages/windmill/src/postgres/certificate_authority.rs @@ -8,6 +8,8 @@ use deadpool_postgres::{Client, Transaction}; use tracing::instrument; use uuid::Uuid; +/// Row representing Certificate authority record +#[allow(missing_docs)] pub struct CertificateAuthorityRecord { pub id: Uuid, pub tenant_id: Uuid, @@ -24,8 +26,14 @@ pub struct CertificateAuthorityRecord { } /// Inserts a certificate authority record into the database. +/// /// Returns `true` if the record was inserted, `false` if it was skipped /// due to a duplicate fingerprint for the same election event. +/// +/// # Errors +/// +/// Returns an error if the `INSERT` statement cannot be prepared or executed (excluding the +/// conflict-no-op case, which surfaces as `false`). #[instrument(skip(hasura_transaction, record), err)] pub async fn insert_certificate_authority( hasura_transaction: &Transaction<'_>, @@ -70,7 +78,14 @@ pub async fn insert_certificate_authority( /// Deletes the certificate authorities matching the given ids, scoped to the /// given tenant and election event. -/// Returns the subjects of all deleted rows. +/// +/// # Returns +/// +/// The `subject` column for each deleted row, in database return order. +/// +/// # Errors +/// +/// Returns an error if the `DELETE … RETURNING` statement cannot be prepared or executed. #[instrument(skip(hasura_transaction), err)] pub async fn delete_certificate_authorities( hasura_transaction: &Transaction<'_>, @@ -97,7 +112,11 @@ pub async fn delete_certificate_authorities( } /// Returns the PEM strings for all certificate authorities belonging to the -/// given election event, ordered by creation time. Uses a transaction. +/// given election event, ordered by creation time. +/// +/// # Errors +/// +/// Returns an error if the `SELECT` cannot be prepared or executed. #[instrument(skip(transaction), err)] pub async fn get_certificate_authorities_pem( transaction: &Transaction<'_>, @@ -127,7 +146,11 @@ pub async fn get_certificate_authorities_pem( /// Returns the PEM strings for the specified certificate authorities (by id), /// scoped to the given election event. If `ids` is empty, returns PEMs for all -/// CAs in the election event (same behaviour as `get_certificate_authorities_pem`). +/// CAs in the election event (same behaviour as [`get_certificate_authorities_pem`]). +/// +/// # Errors +/// +/// Returns an error if the `SELECT` cannot be prepared or executed. #[instrument(skip(transaction), err)] pub async fn get_certificate_authorities_pem_by_ids( transaction: &Transaction<'_>, diff --git a/packages/windmill/src/postgres/contest.rs b/packages/windmill/src/postgres/contest.rs index 1e03871e5c..2a907e52b0 100644 --- a/packages/windmill/src/postgres/contest.rs +++ b/packages/windmill/src/postgres/contest.rs @@ -10,6 +10,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Row representing Contest wrapper pub struct ContestWrapper(pub Contest); impl TryFrom for ContestWrapper { @@ -46,6 +47,12 @@ impl TryFrom for ContestWrapper { })) } } +/// Insert contest into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_contest( @@ -98,6 +105,12 @@ pub async fn insert_contest( Ok(()) } +/// Get all contests for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn export_contests( @@ -139,6 +152,12 @@ pub async fn export_contests( Ok(election_events) } +/// Get contest by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_contest_by_id( @@ -181,6 +200,12 @@ pub async fn get_contest_by_id( Err(anyhow::anyhow!("No contest found with the provided id")) } } +/// Get contest by election id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_contest_by_election_id( @@ -225,6 +250,12 @@ pub async fn get_contest_by_election_id( Ok(contests) } +/// Get contest by elections ids from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_contest_by_election_ids( diff --git a/packages/windmill/src/postgres/document.rs b/packages/windmill/src/postgres/document.rs index 3ad62d4eb1..891424c2ee 100644 --- a/packages/windmill/src/postgres/document.rs +++ b/packages/windmill/src/postgres/document.rs @@ -9,6 +9,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Newtype mapping a `sequent_backend.document` row into [`Document`]. pub struct DocumentWrapper(pub Document); impl TryFrom for DocumentWrapper { @@ -35,8 +36,11 @@ impl TryFrom for DocumentWrapper { } } +/// Join of a support-material row. pub struct SupportMaterialDocumentWrapper { + /// Support material definition (kind, visibility, payload reference). pub support_material: SupportMaterial, + /// Stored file metadata (`name`, `media_type`, size, …) for the linked document. pub document: Document, } @@ -74,7 +78,11 @@ impl TryFrom for SupportMaterialDocumentWrapper { }) } } - +/// Get document by id from the database. +/// +/// # Errors +/// +/// Fails on invalid UUID inputs, when the query cannot run, or when a returned row cannot be decoded. #[instrument(err, skip(hasura_transaction))] pub async fn get_document( hasura_transaction: &Transaction<'_>, @@ -129,6 +137,11 @@ pub async fn get_document( /// Returns a vector of tuples of the (SupportMaterial, Document)s /// associated with a given election event. +/// +/// # Errors +/// +/// Fails on invalid UUID parameters, when preparing or executing the join query fails, or when +/// any row cannot be converted into [`SupportMaterialDocumentWrapper`]. #[instrument(err, skip(hasura_transaction))] pub async fn get_support_material_documents( hasura_transaction: &Transaction<'_>, @@ -195,6 +208,12 @@ pub async fn get_support_material_documents( Ok(Some(documents)) } +/// Insert document into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn insert_document( diff --git a/packages/windmill/src/postgres/election.rs b/packages/windmill/src/postgres/election.rs index fbd1543c1e..c72cdc2b10 100644 --- a/packages/windmill/src/postgres/election.rs +++ b/packages/windmill/src/postgres/election.rs @@ -13,6 +13,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Newtype mapping `sequent_backend.election` rows into [`Election`]. pub struct ElectionWrapper(pub Election); impl TryFrom for ElectionWrapper { @@ -51,10 +52,12 @@ impl TryFrom for ElectionWrapper { } } -/** - * Returns a vector of areas per election event, with the posibility of - * filtering by area_id - */ +/// Returns a vector of areas per election event, with the posibility of +/// filtering by area_id +/// +/// # Errors +/// +/// Fails on invalid UUID parameters, when reading `num_allowed_revotes` from a row fails, or on database errors. #[instrument(skip(hasura_transaction), err)] pub async fn get_election_max_revotes( hasura_transaction: &Transaction<'_>, @@ -104,7 +107,12 @@ pub async fn get_election_max_revotes( Ok(data) } -/* Returns election */ +/// Get an election by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_election_by_id( @@ -149,6 +157,12 @@ pub async fn get_election_by_id( Ok(elections.first().cloned()) } +/// Get all elections for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_elections( @@ -188,6 +202,12 @@ pub async fn get_elections( Ok(elections) } +/// Get elections by ids from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_by_ids( @@ -238,6 +258,12 @@ pub async fn get_elections_by_ids( Ok(elections) } +/// Get elections by keys ceremony id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_by_keys_ceremony_id( @@ -283,6 +309,12 @@ pub async fn get_elections_by_keys_ceremony_id( Ok(elections) } +/// Update election presentation for an election and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn update_election_presentation( @@ -329,6 +361,12 @@ pub async fn update_election_presentation( Ok(()) } +/// Update election voting status for an election and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn update_election_voting_status( @@ -370,6 +408,12 @@ pub async fn update_election_voting_status( Ok(()) } +/// Insert election into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn create_election( @@ -447,6 +491,12 @@ pub async fn create_election( .cloned() .ok_or(anyhow!("Coudln't insert election")) } +/// Insert multiple elections for a given election event into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_elections( @@ -552,6 +602,12 @@ pub async fn insert_elections( Ok(()) } +/// Get all elections for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn export_elections( @@ -593,6 +649,12 @@ pub async fn export_elections( Ok(elections) } +/// Set election keys ceremony for an election and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn set_election_keys_ceremony( @@ -650,6 +712,11 @@ pub async fn set_election_keys_ceremony( Ok(elections) } +/// Update for an election if the initialization report was generated. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn set_election_initialization_report_generated( @@ -689,6 +756,12 @@ pub async fn set_election_initialization_report_generated( Ok(()) } +/// Updates election status and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn update_election_status( @@ -751,6 +824,12 @@ pub async fn update_election_status( Ok(results) } +/// Get all elections ids for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_ids( @@ -790,6 +869,12 @@ pub async fn get_elections_ids( Ok(elections) } +/// Get election permission label from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn get_election_permission_label( diff --git a/packages/windmill/src/postgres/election_event.rs b/packages/windmill/src/postgres/election_event.rs index 1999958bad..3ff28e6f14 100644 --- a/packages/windmill/src/postgres/election_event.rs +++ b/packages/windmill/src/postgres/election_event.rs @@ -12,7 +12,9 @@ use tokio_postgres::row::Row; use tracing::{event, info, instrument, Level}; use uuid::Uuid; +/// Row representing Election event datafix pub struct ElectionEventDatafix(pub ElectionEventData); +/// Row representing Election event wrapper pub struct ElectionEventWrapper(pub ElectionEventData); impl TryFrom for ElectionEventWrapper { @@ -43,6 +45,12 @@ impl TryFrom for ElectionEventWrapper { })) } } +/// Insert election event into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn insert_election_event( @@ -93,6 +101,12 @@ pub async fn insert_election_event( Ok(()) } +/// Loads election event by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_election_event_by_id( @@ -137,6 +151,11 @@ pub async fn get_election_event_by_id( .cloned() .ok_or(anyhow!("Election event {election_event_id} not found")) } +/// Loads election event by id if exist from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_election_event_by_id_if_exist( @@ -180,7 +199,11 @@ pub async fn get_election_event_by_id_if_exist( Ok(election_event) } -/// Returns all the Election events as ElectionEventDatafix +/// Returns a vector of all non-archived election events for a given tenant. +/// +/// # Errors +/// +/// Fails when `tenant_id` is not a valid UUID, when the `SELECT` cannot run, or when row mapping fails. #[instrument(err, skip_all)] pub async fn get_all_tenant_election_events( hasura_transaction: &Transaction<'_>, @@ -214,6 +237,12 @@ pub async fn get_all_tenant_election_events( Ok(election_events) } +/// Updates election event annotations and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. pub async fn update_election_event_annotations( hasura_transaction: &Transaction<'_>, @@ -250,6 +279,12 @@ pub async fn update_election_event_annotations( Ok(()) } +/// Updates election event presentation and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. pub async fn update_election_event_presentation( hasura_transaction: &Transaction<'_>, @@ -288,6 +323,12 @@ pub async fn update_election_event_presentation( Ok(()) } +/// Updates elections status by election event and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn update_elections_status_by_election_event( @@ -335,6 +376,12 @@ pub async fn update_elections_status_by_election_event( Ok(ids) } +/// Updates election event status and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn update_election_event_status( @@ -367,6 +414,12 @@ pub async fn update_election_event_status( Ok(()) } +/// Loads election event by election area from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_election_event_by_election_area( @@ -424,6 +477,12 @@ pub async fn get_election_event_by_election_area( .cloned() .ok_or(anyhow!("Election event not found")) } +/// Deletes election event and related data within the tenant scope. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn delete_election_event( @@ -509,6 +568,12 @@ pub async fn delete_election_event( Ok(()) } +/// Updates bulletin board for an election event and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn update_bulletin_board( @@ -546,6 +611,12 @@ pub async fn update_bulletin_board( Ok(()) } +/// Loads batch election events from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_batch_election_events( diff --git a/packages/windmill/src/postgres/keycloak_realm.rs b/packages/windmill/src/postgres/keycloak_realm.rs index b57986da1a..a1b84ee53a 100644 --- a/packages/windmill/src/postgres/keycloak_realm.rs +++ b/packages/windmill/src/postgres/keycloak_realm.rs @@ -11,6 +11,12 @@ use tokio_postgres::row::Row; use tokio_postgres::types::ToSql; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Get realm id for a given realm name from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(keycloak_transaction), err)] pub async fn get_realm_id( diff --git a/packages/windmill/src/postgres/keys_ceremony.rs b/packages/windmill/src/postgres/keys_ceremony.rs index cd9dd92342..5c54d65d90 100644 --- a/packages/windmill/src/postgres/keys_ceremony.rs +++ b/packages/windmill/src/postgres/keys_ceremony.rs @@ -10,6 +10,7 @@ use tokio_postgres::row::Row; use tracing::{event, info, instrument, Level}; use uuid::Uuid; +/// Row representing Keys ceremony wrapper pub struct KeysCeremonyWrapper(pub KeysCeremony); impl TryFrom for KeysCeremonyWrapper { @@ -39,6 +40,12 @@ impl TryFrom for KeysCeremonyWrapper { })) } } +/// Get all keys ceremonies for a given tenant and election event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_keys_ceremonies( @@ -80,6 +87,12 @@ pub async fn get_keys_ceremonies( Ok(keys_ceremonies) } +/// Get keys ceremony by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn get_keys_ceremony_by_id( @@ -127,6 +140,12 @@ pub async fn get_keys_ceremony_by_id( .cloned() .ok_or(anyhow!("Keys ceremony {keys_ceremony_id} not found")) } +/// Insert keys ceremony into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn insert_keys_ceremony( @@ -209,6 +228,12 @@ pub async fn insert_keys_ceremony( elements.first().cloned().ok_or(anyhow!("Row not inserted")) } +/// Updates keys ceremony status and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction, status), err)] pub async fn update_keys_ceremony_status( @@ -257,6 +282,12 @@ pub async fn update_keys_ceremony_status( Ok(()) } +/// Lists keys ceremony with the given filters. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn list_keys_ceremony( diff --git a/packages/windmill/src/postgres/lock.rs b/packages/windmill/src/postgres/lock.rs index 6bf17e9aab..8336335ad9 100644 --- a/packages/windmill/src/postgres/lock.rs +++ b/packages/windmill/src/postgres/lock.rs @@ -19,6 +19,12 @@ impl TryFrom for PgLock { }) } } +/// Insert or update lock into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn upsert_lock( @@ -71,6 +77,12 @@ pub async fn upsert_lock( Err(anyhow!("Couldn't upsert lock")) } } +/// Delete lock from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn delete_lock( diff --git a/packages/windmill/src/postgres/maintenance.rs b/packages/windmill/src/postgres/maintenance.rs index a8ebe35ef9..e8b59b0783 100644 --- a/packages/windmill/src/postgres/maintenance.rs +++ b/packages/windmill/src/postgres/maintenance.rs @@ -7,6 +7,12 @@ use crate::types::error::{Error, Result}; use anyhow::{anyhow, Context}; use deadpool_postgres::{Client as DbClient, Transaction as _}; use tracing::{error, info, instrument}; +/// Perform vacuum analyze on the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if database connection fails, or if VACUUM ANALYZE fails. #[instrument(err)] pub async fn vacuum_analyze_direct() -> Result<()> { diff --git a/packages/windmill/src/postgres/mod.rs b/packages/windmill/src/postgres/mod.rs index e07691b8df..6f271f3391 100644 --- a/packages/windmill/src/postgres/mod.rs +++ b/packages/windmill/src/postgres/mod.rs @@ -2,40 +2,79 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! PostgreSQL access for Windmill against the `sequent_backend` schema exposed via Hasura. + +/// Enrollment applications and related filters for an election event. pub mod application; +/// Hierarchical areas and links to contests. pub mod area; +/// Join rows between areas and contests for a tenant and election event. pub mod area_contest; +/// Ballot publication records tied to publication workflows. pub mod ballot_publication; +/// Per-contest or per-election ballot styling stored in Postgres. pub mod ballot_style; +/// Candidate rows for contests within an election event. pub mod candidate; +/// Cast vote payloads and metadata persisted for auditing and tally. pub mod cast_vote; +/// Certificate authority material recorded during setup ceremonies. pub mod certificate_authority; +/// Contest definitions scoped to tenant and election event. pub mod contest; +/// Generated or uploaded documents (PDFs, exports) tracked in the backend. pub mod document; +/// Election records (parent of contests) for an election event. pub mod election; +/// Election event rows, dates, and presentation configuration. pub mod election_event; +/// Keycloak realm identifiers and linkage stored for automation tasks. pub mod keycloak_realm; +/// Trustee keys and ceremony progress for cryptographic setup. pub mod keys_ceremony; +/// Advisory locks to serialize concurrent maintenance on shared rows. pub mod lock; +/// Housekeeping queries (cleanup, backfills) run from Windmill. pub mod maintenance; +/// Public preview configuration blobs for the voting portal. pub mod preview; +/// Report render jobs and output paths associated with templates. pub mod render_report; +/// Report definitions and scheduling metadata in `sequent_backend.report`. pub mod reports; +/// Per-area, per-contest tally aggregates after a tally session. pub mod results_area_contest; +/// Candidate-level results under an area contest aggregation. pub mod results_area_contest_candidate; +/// Contest-wide tally totals independent of area breakdown. pub mod results_contest; +/// Candidate totals within a contest result snapshot. pub mod results_contest_candidate; +/// Election-wide rolled-up results for a tally session. pub mod results_election; +/// Area-scoped slices of election-wide results. pub mod results_election_area; +/// High-level result events (sessions, publication markers). pub mod results_event; +/// Cron-driven scheduled events stored for the worker. pub mod scheduled_event; +/// Opaque secret references persisted for tasks (not the plaintext values). pub mod secret; +/// Tally session lifecycle (start, status, completion) in Postgres. pub mod tally_session; +/// Contest-level rows inside an active tally session. pub mod tally_session_contest; +/// Per-step execution records while a tally session runs. pub mod tally_session_execution; +/// Resolution or cancellation markers when a tally session ends. pub mod tally_session_resolution; +/// Tally sheet definitions and uploaded sheet metadata. pub mod tally_sheet; +/// Celery task execution logs mirrored or queried from Postgres. pub mod tasks_execution; +/// Communication templates (email/SMS) stored for rendering. pub mod template; +/// Tenant-level rows used by cross-event maintenance tasks. pub mod tenant; +/// Trustee directory entries for an election event. pub mod trustee; diff --git a/packages/windmill/src/postgres/preview.rs b/packages/windmill/src/postgres/preview.rs index 8cf1c4f1f5..8c823f3cee 100644 --- a/packages/windmill/src/postgres/preview.rs +++ b/packages/windmill/src/postgres/preview.rs @@ -8,6 +8,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Row representing Preview wrapper pub struct PreviewWrapper(pub Preview); impl TryFrom for PreviewWrapper { @@ -26,6 +27,12 @@ impl TryFrom for PreviewWrapper { })) } } +/// Insert preview into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn insert_preview( diff --git a/packages/windmill/src/postgres/render_report.rs b/packages/windmill/src/postgres/render_report.rs index 15da79e2ae..17b00f358a 100644 --- a/packages/windmill/src/postgres/render_report.rs +++ b/packages/windmill/src/postgres/render_report.rs @@ -13,6 +13,11 @@ use crate::postgres::tenant::get_tenant_by_id; use crate::services::documents::upload_and_return_document; use crate::tasks::render_report::{FormatType, RenderTemplateBody}; use sequent_core::util::temp_path::write_into_named_temp_file; +/// Render report for a given template, tenant and election event. +/// +/// # Errors +/// +/// Return an error if the report rendering fails. #[instrument(err, skip(hasura_transaction))] pub async fn render_report_task( diff --git a/packages/windmill/src/postgres/reports.rs b/packages/windmill/src/postgres/reports.rs index 4bf9782716..4dcc2c5168 100644 --- a/packages/windmill/src/postgres/reports.rs +++ b/packages/windmill/src/postgres/reports.rs @@ -19,44 +19,69 @@ use uuid::Uuid; use crate::services::reports::template_renderer::EReportEncryption; #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone, Default)] +/// Report cron config representation pub struct ReportCronConfig { #[serde(default)] + /// Is active. pub is_active: bool, #[serde(default)] + /// Last document produced. pub last_document_produced: Option, #[serde(default)] + /// Cron expression. pub cron_expression: String, #[serde(default)] + /// Email recipients. pub email_recipients: Vec, #[serde(default)] + /// Executer username. pub executer_username: String, } #[derive(Debug, Clone, Serialize, Deserialize)] +/// Report representation pub struct Report { + /// Primary identifier for this entity. pub id: String, + /// Election event this data belongs to. pub election_event_id: String, + /// Tenant that owns or scopes this record. pub tenant_id: String, + /// Parent election identifier when applicable. pub election_id: Option, + /// Report type. pub report_type: String, + /// Template alias. pub template_alias: Option, + /// Encryption policy. pub encryption_policy: EReportEncryption, + /// Cron config. pub cron_config: Option, + /// Creation timestamp from the database. pub created_at: DateTime, + /// Keycloak permission label resolved for access control. pub permission_label: Option>, } +/// Stored `report_type` discriminator matching `sequent_backend.report.report_type`. #[allow(non_camel_case_types)] #[derive(Display, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, EnumString)] pub enum ReportType { + /// Initialization / setup report generated after keys ceremony completes. INITIALIZATION_REPORT, + /// Published numerical results and placements. ELECTORAL_RESULTS, + /// Per-ballot image bundle for verification. BALLOT_IMAGES, + /// Voter receipt PDFs or similar artifacts. BALLOT_RECEIPT, + /// Operator activity audit export. ACTIVITY_LOGS, + /// Manual verification checklist output. MANUAL_VERIFICATION, } +/// Report wrapper pub struct ReportWrapper(pub Report); impl TryFrom for ReportWrapper { @@ -94,6 +119,11 @@ impl TryFrom for ReportWrapper { })) } } +/// Get all active reports from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_all_active_reports(hasura_transaction: &Transaction<'_>) -> Result> { @@ -125,6 +155,12 @@ pub async fn get_all_active_reports(hasura_transaction: &Transaction<'_>) -> Res .with_context(|| "Error converting rows into Report")?; Ok(reports) } +/// Updates report last document time and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn update_report_last_document_time( @@ -169,6 +205,12 @@ pub async fn update_report_last_document_time( Ok(()) } +/// Get report by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_report_by_id( @@ -213,6 +255,11 @@ pub async fn get_report_by_id( /// Returns ONLY THE FIRST the template_alias which matches these arguments, /// If there are multiple matches, the rest are ignored. +/// +/// # Errors +/// +/// Fails on invalid UUID strings, when preparing or executing either fallback query fails, or when +/// decoding the returned `template_alias` fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_template_alias_for_report( hasura_transaction: &Transaction<'_>, @@ -304,6 +351,7 @@ pub async fn get_template_alias_for_report( } return Ok(None); } +/// Get reports by condition from the database. #[instrument(skip(hasura_transaction), err)] async fn get_reports_by_condition( @@ -347,6 +395,11 @@ async fn get_reports_by_condition( Ok(reports) } +/// Get reports by election event id from the database. +/// +/// # Errors +/// +/// Returns an error if `get_reports_by_condition` fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_reports_by_election_event_id( @@ -362,6 +415,11 @@ pub async fn get_reports_by_election_event_id( ) .await } +/// Get reports by election id from the database. +/// +/// # Errors +/// +/// Returns an error if `get_reports_by_condition` fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_reports_by_election_id( @@ -371,6 +429,12 @@ pub async fn get_reports_by_election_id( ) -> Result> { get_reports_by_condition(hasura_transaction, tenant_id, election_id, "election_id").await } +/// Insert reports into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_reports( @@ -434,6 +498,12 @@ pub async fn insert_reports( Ok(()) } +/// Get report by type from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip_all, err)] pub async fn get_report_by_type( diff --git a/packages/windmill/src/postgres/results_area_contest.rs b/packages/windmill/src/postgres/results_area_contest.rs index 85d2753964..8463c98dc5 100644 --- a/packages/windmill/src/postgres/results_area_contest.rs +++ b/packages/windmill/src/postgres/results_area_contest.rs @@ -17,6 +17,7 @@ use tokio_postgres::types::ToSql; use tracing::{info, instrument}; use uuid::Uuid; +/// Results area contest wrapper pub struct ResultsAreaContestWrapper(pub ResultsAreaContest); impl TryFrom for ResultsAreaContestWrapper { type Error = anyhow::Error; @@ -101,6 +102,12 @@ impl TryFrom for ResultsAreaContestWrapper { })) } } +/// Update results area contest documents +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn update_results_area_contest_documents( @@ -173,6 +180,12 @@ pub async fn update_results_area_contest_documents( Err(anyhow!("Rows not found in table results_area_contest")) } } +/// Get results area contest from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_results_area_contest( @@ -244,6 +257,12 @@ pub async fn get_results_area_contest( None => Ok(None), } } +/// Insert results area contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, area_contests))] pub async fn insert_results_area_contests( @@ -257,7 +276,9 @@ pub async fn insert_results_area_contests( return Ok(Vec::new()); } + /// JSON row shape fed into `jsonb_to_recordset` for bulk-inserting area/contest tallies. #[derive(Serialize)] + #[allow(clippy::missing_docs_in_private_items)] struct InsertAreaContestData { tenant_id: Uuid, election_event_id: Uuid, @@ -421,6 +442,12 @@ pub async fn insert_results_area_contests( Ok(values) } +/// Get event results area contest from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_event_results_area_contest( @@ -462,7 +489,9 @@ pub async fn get_event_results_area_contest( Ok(results) } +/// Full row mirror used with `jsonb_to_recordset` for [`insert_many_results_area_contests`]. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsAreaContest { id: Uuid, tenant_id: Uuid, @@ -492,6 +521,12 @@ struct InsertableResultsAreaContest { total_auditable_votes: Option, total_auditable_votes_percent: Option, } +/// Insert many results area contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, records))] pub async fn insert_many_results_area_contests( diff --git a/packages/windmill/src/postgres/results_area_contest_candidate.rs b/packages/windmill/src/postgres/results_area_contest_candidate.rs index 9fe71e5dbd..db7dda0c3c 100644 --- a/packages/windmill/src/postgres/results_area_contest_candidate.rs +++ b/packages/windmill/src/postgres/results_area_contest_candidate.rs @@ -16,6 +16,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Results area contest candidate wrapper pub struct ResultsAreaContestCandidateWrapper(pub ResultsAreaContestCandidate); impl TryFrom for ResultsAreaContestCandidateWrapper { @@ -59,6 +60,12 @@ impl TryFrom for ResultsAreaContestCandidateWrapper { )) } } +/// Get results area contest candidates from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_results_area_contest_candidates( @@ -122,6 +129,12 @@ pub async fn get_results_area_contest_candidates( Ok(results_area_contest_candidate.first().cloned()) } +/// Insert results area contest candidates into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, contest_candidates))] pub async fn insert_results_area_contest_candidates( @@ -134,11 +147,15 @@ pub async fn insert_results_area_contest_candidates( if contest_candidates.is_empty() { return Ok(Vec::new()); } + + /// JSON-friendly row for `jsonb_to_recordset` when inserting per-candidate area results. #[derive(Debug, Serialize)] + #[allow(clippy::missing_docs_in_private_items)] pub struct InsertResultsAreaContestCandidate { pub tenant_id: Uuid, pub election_event_id: Uuid, pub results_event_id: Uuid, + /// Parent election identifier when applicable. pub election_id: Uuid, pub contest_id: Uuid, pub candidate_id: Uuid, @@ -237,6 +254,12 @@ pub async fn insert_results_area_contest_candidates( Ok(values) } +/// Get event results area contest candidates from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_event_results_area_contest_candidates( @@ -280,7 +303,9 @@ pub async fn get_event_results_area_contest_candidates( Ok(results_area_contest_candidate) } +/// Full row mirror for [`insert_many_results_area_contest_candidates`] JSON bulk insert. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsAreaContestCandidate { id: Uuid, tenant_id: Uuid, @@ -300,6 +325,12 @@ struct InsertableResultsAreaContestCandidate { cast_votes_percent: Option, documents: Option, } +/// Insert many results area contest candidates into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, candidates))] pub async fn insert_many_results_area_contest_candidates( diff --git a/packages/windmill/src/postgres/results_contest.rs b/packages/windmill/src/postgres/results_contest.rs index 8ce632a9b5..0a9e9ec065 100644 --- a/packages/windmill/src/postgres/results_contest.rs +++ b/packages/windmill/src/postgres/results_contest.rs @@ -16,6 +16,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Results contest wrapper pub struct ResultsContestWrapper(pub ResultsContest); impl TryFrom for ResultsContestWrapper { @@ -103,6 +104,12 @@ impl TryFrom for ResultsContestWrapper { })) } } +/// Updates results contest documents and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn update_results_contest_documents( @@ -169,6 +176,12 @@ pub async fn update_results_contest_documents( Err(anyhow!("Rows not found in table results_contest")) } } +/// Get results contest from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_results_contest( @@ -227,6 +240,12 @@ pub async fn get_results_contest( )) } } +/// Insert results contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, contests))] pub async fn insert_results_contests( @@ -240,7 +259,9 @@ pub async fn insert_results_contests( return Ok(Vec::new()); } + /// JSON row for `jsonb_to_recordset` when inserting global (non-area) contest tallies. #[derive(Serialize)] + #[allow(clippy::missing_docs_in_private_items)] struct InsertContestData { tenant_id: Uuid, election_event_id: Uuid, @@ -412,6 +433,11 @@ pub async fn insert_results_contests( Ok(values) } +/// Get event results contest from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(skip(hasura_transaction), err)] pub async fn get_event_results_contest( @@ -454,7 +480,9 @@ pub async fn get_event_results_contest( Ok(results) } +/// Full contest-level tally row serialized for [`insert_many_results_contests`]. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsContest { id: Uuid, tenant_id: Uuid, @@ -486,6 +514,12 @@ struct InsertableResultsContest { total_auditable_votes: Option, total_auditable_votes_percent: Option, } +/// Insert many results contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, results_contests))] pub async fn insert_many_results_contests( diff --git a/packages/windmill/src/postgres/results_contest_candidate.rs b/packages/windmill/src/postgres/results_contest_candidate.rs index 204d71d23a..468e4db379 100644 --- a/packages/windmill/src/postgres/results_contest_candidate.rs +++ b/packages/windmill/src/postgres/results_contest_candidate.rs @@ -16,6 +16,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Results contest candidate wrapper pub struct ResultsContestCandidateWrapper(pub ResultsContestCandidate); impl TryFrom for ResultsContestCandidateWrapper { @@ -56,6 +57,12 @@ impl TryFrom for ResultsContestCandidateWrapper { })) } } +/// Insert results contest candidates into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, contest_candidates))] pub async fn insert_results_contest_candidates( @@ -68,7 +75,9 @@ pub async fn insert_results_contest_candidates( if contest_candidates.is_empty() { return Ok(Vec::new()); } + /// JSON row for `jsonb_to_recordset` when inserting contest-wide candidate totals. #[derive(Debug, Serialize)] + #[allow(clippy::missing_docs_in_private_items)] pub struct InsertResultsContestCandidate { pub tenant_id: Uuid, pub election_event_id: Uuid, @@ -166,6 +175,12 @@ pub async fn insert_results_contest_candidates( Ok(values) } +/// Get event results contest candidates from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_event_results_contest_candidates( @@ -208,7 +223,9 @@ pub async fn get_event_results_contest_candidates( Ok(results_contest_candidate) } +/// Full row mirror for [`insert_many_results_contest_candidates`] JSON bulk insert. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsContestCandidate { id: Uuid, tenant_id: Uuid, @@ -227,6 +244,12 @@ struct InsertableResultsContestCandidate { cast_votes_percent: Option, documents: Option, } +/// Insert many results contest candidates into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, candidates))] pub async fn insert_many_results_contest_candidates( diff --git a/packages/windmill/src/postgres/results_election.rs b/packages/windmill/src/postgres/results_election.rs index ca4b6177a9..b30968d2ff 100644 --- a/packages/windmill/src/postgres/results_election.rs +++ b/packages/windmill/src/postgres/results_election.rs @@ -17,6 +17,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Results election wrapper pub struct ResultsElectionWrapper(pub ResultsElection); impl TryFrom for ResultsElectionWrapper { @@ -53,6 +54,12 @@ impl TryFrom for ResultsElectionWrapper { })) } } +/// Update results election documents into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip_all, err)] pub async fn update_results_election_documents( @@ -122,6 +129,12 @@ pub async fn update_results_election_documents( Err(anyhow!("Rows not found in table results_election")) } } +/// Insert results elections into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, elections))] pub async fn insert_results_elections( @@ -134,7 +147,9 @@ pub async fn insert_results_elections( if elections.is_empty() { return Ok(Vec::new()); } + /// JSON row for `jsonb_to_recordset` when persisting election-level turnout aggregates. #[derive(Debug, Serialize)] + #[allow(clippy::missing_docs_in_private_items)] pub struct InsertResultsElection { pub tenant_id: Uuid, pub election_event_id: Uuid, @@ -205,6 +220,12 @@ pub async fn insert_results_elections( Ok(values) } +/// Get election results from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err)] pub async fn get_election_results( @@ -256,6 +277,12 @@ pub async fn get_election_results( Ok(results) } +/// Get results election by results event id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err)] pub async fn get_results_election_by_results_event_id( @@ -310,6 +337,12 @@ pub async fn get_results_election_by_results_event_id( .cloned() .ok_or(anyhow!("Results election not found")) } +/// Get event results election from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_event_results_election( @@ -354,7 +387,9 @@ pub async fn get_event_results_election( Ok(results) } +/// Full election-level results row for [`insert_many_results_elections`]. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsElection { id: Uuid, tenant_id: Uuid, @@ -371,6 +406,12 @@ struct InsertableResultsElection { annotations: Option, documents: Option, } +/// Insert many results elections into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, results))] pub async fn insert_many_results_elections( diff --git a/packages/windmill/src/postgres/results_election_area.rs b/packages/windmill/src/postgres/results_election_area.rs index 17139053cc..73f99872e4 100644 --- a/packages/windmill/src/postgres/results_election_area.rs +++ b/packages/windmill/src/postgres/results_election_area.rs @@ -14,6 +14,7 @@ use tokio_postgres::types::ToSql; use tracing::instrument; use uuid::Uuid; +/// Results election area wrapper pub struct ResultsElectionAreaWrapper(pub ResultsElectionArea); impl TryFrom for ResultsElectionAreaWrapper { @@ -38,6 +39,12 @@ impl TryFrom for ResultsElectionAreaWrapper { })) } } +/// Insert results election area documents into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction, documents), err)] pub async fn insert_results_election_area_documents( @@ -96,6 +103,12 @@ pub async fn insert_results_election_area_documents( .map_err(|err| anyhow!("Error at inser into results_election_area {} ", err))?; Ok(()) } +/// Get event results election area from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err)] pub async fn get_event_results_election_area( @@ -140,7 +153,9 @@ pub async fn get_event_results_election_area( Ok(results) } +/// Area-scoped turnout snapshot row serialized for [`insert_many_results_elections_areas`]. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsElectionArea { id: Uuid, tenant_id: Uuid, @@ -153,6 +168,12 @@ struct InsertableResultsElectionArea { documents: Option, name: Option, } +/// Insert many results elections areas into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, areas))] pub async fn insert_many_results_elections_areas( diff --git a/packages/windmill/src/postgres/results_event.rs b/packages/windmill/src/postgres/results_event.rs index 6c24c24b6d..aefb8e7b81 100644 --- a/packages/windmill/src/postgres/results_event.rs +++ b/packages/windmill/src/postgres/results_event.rs @@ -13,6 +13,7 @@ use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; +/// Results event wrapper pub struct ResultsEventWrapper(pub ResultsEvent); impl TryFrom for ResultsEventWrapper { @@ -36,6 +37,12 @@ impl TryFrom for ResultsEventWrapper { })) } } +/// Update results event documents into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn update_results_event_documents( @@ -92,6 +99,12 @@ pub async fn update_results_event_documents( Err(anyhow!("Rows not found in table results_event")) } } +/// Get results event by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_results_event_by_id( @@ -139,6 +152,12 @@ pub async fn get_results_event_by_id( .cloned() .ok_or(anyhow!("Results event {results_event_id} not found")) } +/// Insert results event into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction), ret)] pub async fn insert_results_event( @@ -188,6 +207,12 @@ pub async fn insert_results_event( }; Ok(value.clone()) } +/// Get results event by event id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_results_event_by_event_id( @@ -230,7 +255,9 @@ pub async fn get_results_event_by_event_id( Ok(results_events) } +/// Results publication / tally event header row for [`insert_many_results_events`]. #[derive(Debug, Serialize)] +#[allow(clippy::missing_docs_in_private_items)] struct InsertableResultsEvent { id: Uuid, tenant_id: Uuid, @@ -242,6 +269,12 @@ struct InsertableResultsEvent { labels: Option, documents: Option, } +/// Insert many results events into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, results_events))] pub async fn insert_many_results_events( diff --git a/packages/windmill/src/postgres/scheduled_event.rs b/packages/windmill/src/postgres/scheduled_event.rs index 783407b2cc..f322b44fda 100644 --- a/packages/windmill/src/postgres/scheduled_event.rs +++ b/packages/windmill/src/postgres/scheduled_event.rs @@ -16,6 +16,7 @@ use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Scheduled event wrapper pub struct ScheduledEventWrapper(pub ScheduledEvent); impl TryFrom for ScheduledEventWrapper { @@ -63,6 +64,11 @@ impl TryFrom for ScheduledEventWrapper { })) } } +/// Get all active events from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails. #[instrument(skip(hasura_transaction), err)] pub async fn find_all_active_events( @@ -95,6 +101,12 @@ pub async fn find_all_active_events( .with_context(|| "Error converting rows into ScheduledEvent")?; Ok(scheduled_events) } +/// Get scheduled event by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn find_scheduled_event_by_id( @@ -150,6 +162,12 @@ pub async fn find_scheduled_event_by_id( Ok(scheduled_events.first().cloned()) } +/// Get scheduled event by task id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn find_scheduled_event_by_task_id( @@ -194,6 +212,12 @@ pub async fn find_scheduled_event_by_task_id( Ok(scheduled_events.first().cloned()) } +/// Stop scheduled event in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn stop_scheduled_event( @@ -228,6 +252,12 @@ pub async fn stop_scheduled_event( Ok(()) } +/// Archive scheduled event in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn archive_scheduled_event( @@ -262,6 +292,12 @@ pub async fn archive_scheduled_event( Ok(()) } +/// Update scheduled event in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn update_scheduled_event( @@ -299,6 +335,12 @@ pub async fn update_scheduled_event( Ok(()) } +/// Insert scheduled event into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_scheduled_event( @@ -386,6 +428,12 @@ pub async fn insert_scheduled_event( Err(anyhow!("Unexpected rows affected {}", rows.len())) } } +/// Get scheduled event by election event id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn find_scheduled_event_by_election_event_id( @@ -430,6 +478,12 @@ pub async fn find_scheduled_event_by_election_event_id( Ok(scheduled_events) } +/// Get scheduled event by election event id and event processor from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn find_scheduled_event_by_election_event_id_and_event_processor( @@ -476,6 +530,12 @@ pub async fn find_scheduled_event_by_election_event_id_and_event_processor( Ok(scheduled_events) } +/// Insert new scheduled event into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_new_scheduled_event( diff --git a/packages/windmill/src/postgres/secret.rs b/packages/windmill/src/postgres/secret.rs index 28c9fe1bb9..367201146b 100644 --- a/packages/windmill/src/postgres/secret.rs +++ b/packages/windmill/src/postgres/secret.rs @@ -12,15 +12,25 @@ use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; +/// Row from `sequent_backend.secret` holding an opaque key/value pair for workers. #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] +#[allow(missing_docs)] pub struct Secret { + /// Primary key as string UUID. pub id: String, + /// Owning tenant id as string UUID. pub tenant_id: String, + /// Optional election event scope when the secret is event-specific. pub election_event_id: Option, + /// Structured labels JSON. pub labels: Option, + /// Operator annotations JSON. pub annotations: Option, + /// secret key pub key: String, + /// Raw secret byte. pub value: Vec, + /// Creation timestamp when present on the row. pub created_at: Option>, } @@ -42,6 +52,12 @@ impl TryFrom for Secret { }) } } +/// Get secret by key from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_secret_by_key( @@ -93,6 +109,12 @@ pub async fn get_secret_by_key( } Ok(Some(secrets[0].clone())) } +/// Insert secret into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_secret( diff --git a/packages/windmill/src/postgres/tally_session.rs b/packages/windmill/src/postgres/tally_session.rs index 38d49dc19b..cb541c0762 100644 --- a/packages/windmill/src/postgres/tally_session.rs +++ b/packages/windmill/src/postgres/tally_session.rs @@ -17,6 +17,7 @@ use tokio_postgres::{row::Row, types::ToSql}; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Tally session wrapper pub struct TallySessionWrapper(pub TallySession); impl TryFrom for TallySessionWrapper { @@ -62,6 +63,12 @@ impl TryFrom for TallySessionWrapper { })) } } +/// Insert tally session into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_tally_session( @@ -149,6 +156,12 @@ pub async fn insert_tally_session( }; Ok(value.clone()) } +/// Get tally session by election event id and is tally task pending from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_tally_session_by_election_event_id_pending_post_tally_task( @@ -193,6 +206,12 @@ pub async fn get_tally_session_by_election_event_id_pending_post_tally_task( Ok(elements) } +/// Get tally sessions by election event id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_tally_sessions_by_election_event_id( @@ -243,6 +262,12 @@ pub async fn get_tally_sessions_by_election_event_id( Ok(elements) } +/// Get tally session by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_tally_session_by_id( @@ -290,6 +315,12 @@ pub async fn get_tally_session_by_id( .cloned() .ok_or(anyhow!("Tally Session {tally_session_id} not found")) } +/// Update tally session annotation in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip_all)] pub async fn update_tally_session_annotation( @@ -329,6 +360,12 @@ pub async fn update_tally_session_annotation( Ok(()) } +/// Get tally sessions by election id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_tally_sessions_by_election_id( @@ -371,6 +408,12 @@ pub async fn get_tally_sessions_by_election_id( Ok(tally_sessions) } +/// Update tally session status in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip_all)] pub async fn update_tally_session_status( @@ -414,6 +457,12 @@ pub async fn update_tally_session_status( Ok(()) } +/// Set post tally task completed in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip_all)] pub async fn set_post_tally_task_completed( @@ -451,6 +500,12 @@ pub async fn set_post_tally_task_completed( Ok(()) } +/// Set tally session completed in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip_all)] pub async fn set_tally_session_completed( @@ -493,21 +548,40 @@ pub async fn set_tally_session_completed( Ok(()) } +/// Serialized tally session row for [`insert_many_tally_sessions`] bulk insert. #[derive(Debug, Serialize)] struct InsertableTallySession { + /// Owning tenant. tenant_id: Uuid, + /// Election event being tallied. election_event_id: Uuid, + /// Tally session primary key. id: Uuid, + /// Keys ceremony that produced the trustee material for this run. keys_ceremony_id: Uuid, + /// Elections included in this tally batch. election_ids: Vec, + /// Areas included in this tally batch. area_ids: Vec, + /// Serialized execution status for the tally worker state machine. execution_status: Option, + /// Trustee threshold configured for the mix/tally. threshold: i32, + /// JSON configuration snapshot for the tally engine. configuration: Option, + /// Tally algorithm identifier (`tally_type` column). tally_type: Option, + /// Arbitrary annotations JSON merged by tasks. annotations: Option, + /// Election permission labels permission_label: Option>, } +/// Insert many tally sessions into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction, sessions), err)] pub async fn insert_many_tally_sessions( diff --git a/packages/windmill/src/postgres/tally_session_contest.rs b/packages/windmill/src/postgres/tally_session_contest.rs index a54ecffdec..a1495e89c4 100644 --- a/packages/windmill/src/postgres/tally_session_contest.rs +++ b/packages/windmill/src/postgres/tally_session_contest.rs @@ -13,6 +13,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, warn, Level}; use uuid::Uuid; +/// Tally session contest wrapper pub struct TallySessionContestWrapper(pub TallySessionContest); impl TryFrom for TallySessionContestWrapper { @@ -37,6 +38,12 @@ impl TryFrom for TallySessionContestWrapper { })) } } +/// Update tally session contests annotations in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. pub async fn update_tally_session_contests_annotations( hasura_transaction: &Transaction<'_>, @@ -80,6 +87,12 @@ pub async fn update_tally_session_contests_annotations( Ok(()) } +/// Insert tally session contest into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_tally_session_contest( @@ -143,6 +156,16 @@ pub async fn insert_tally_session_contest( }; Ok(value.clone()) } +/// Get tally session highest batch from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. +/// +/// # Panics +/// +/// Panics only if internal SQL placeholder arithmetic overflows. #[instrument(skip(hasura_transaction), err)] pub async fn get_tally_session_highest_batch( @@ -191,6 +214,12 @@ pub async fn get_tally_session_highest_batch( .checked_add(1) .expect("tally session batch number overflow")) } +/// Get tally session contests from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_tally_session_contests( @@ -246,6 +275,12 @@ pub async fn get_tally_session_contests( Ok(values) } +/// Get event tally session contest from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_event_tally_session_contest( @@ -286,21 +321,40 @@ pub async fn get_event_tally_session_contest( Ok(values) } +/// One work unit (area × optional contest) inside a tally session, for bulk insert. #[derive(Debug, Serialize)] struct InsertableTallySessionContest { + /// Row primary key. id: Uuid, + /// Owning tenant. tenant_id: Uuid, + /// Election event scope. election_event_id: Uuid, + /// Area being processed in this slice. area_id: Uuid, + /// Optional contest when the slice is contest-specific. contest_id: Option, + /// Worker session index for ordering partial results. session_id: i32, + /// Creation timestamp when supplied. created_at: Option>, + /// Last update timestamp when supplied. last_updated_at: Option>, + /// Labels JSON. labels: Option, + /// Annotations JSON. annotations: Option, + /// Parent tally session identifier. tally_session_id: Uuid, + /// Parent election identifier. election_id: Uuid, } +/// Insert many tally session contests into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction, contests), err)] pub async fn insert_many_tally_session_contests( diff --git a/packages/windmill/src/postgres/tally_session_execution.rs b/packages/windmill/src/postgres/tally_session_execution.rs index a514300775..b8886d4faa 100644 --- a/packages/windmill/src/postgres/tally_session_execution.rs +++ b/packages/windmill/src/postgres/tally_session_execution.rs @@ -15,6 +15,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Tally session execution wrapper pub struct TallySessionExecutionWrapper(pub TallySessionExecution); impl TryFrom for TallySessionExecutionWrapper { @@ -40,6 +41,12 @@ impl TryFrom for TallySessionExecutionWrapper { })) } } +/// Insert tally session execution into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction, status), err)] pub async fn insert_tally_session_execution( @@ -118,6 +125,12 @@ pub async fn insert_tally_session_execution( }; Ok(value.clone()) } +/// Get tally session executions from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_tally_session_executions( @@ -163,6 +176,12 @@ pub async fn get_tally_session_executions( Ok(elements) } +/// Get last tally session execution from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_last_tally_session_execution( @@ -209,6 +228,12 @@ pub async fn get_last_tally_session_execution( Ok(elements.first().cloned()) } +/// Get event tally session executions from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. pub async fn get_event_tally_session_executions( hasura_transaction: &Transaction<'_>, @@ -250,21 +275,40 @@ pub async fn get_event_tally_session_executions( Ok(elements) } +/// Serialized tally execution state row for [`insert_many_tally_session_executions`]. #[derive(Debug, Serialize)] struct InsertableTallySessionExecution { + /// Execution row primary key. id: Uuid, + /// Owning tenant. tenant_id: Uuid, + /// Election event scope. election_event_id: Uuid, + /// Creation timestamp when supplied. created_at: Option>, + /// Last update timestamp when supplied. last_updated_at: Option>, + /// Labels JSON. labels: Option, + /// Annotations JSON. annotations: Option, + /// Last processed Celery/RabbitMQ message id for this execution. current_message_id: i32, + /// Parent tally session identifier. tally_session_id: Uuid, + /// Ordered list of child session ids participating in this execution. session_ids: Option>, + /// JSON blob describing sub-step status for the worker UI. status: Option, + /// Linked results publication once the tally publishes. results_event_id: Option, } +/// Insert many tally session executions into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction, executions))] pub async fn insert_many_tally_session_executions( @@ -343,6 +387,12 @@ pub async fn insert_many_tally_session_executions( Ok(inserted) } +/// Updates tally session execution documents and returns the updated row when applicable. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip_all)] pub async fn update_tally_session_execution_documents( diff --git a/packages/windmill/src/postgres/tally_session_resolution.rs b/packages/windmill/src/postgres/tally_session_resolution.rs index d22b1e25a6..1a88b3b733 100644 --- a/packages/windmill/src/postgres/tally_session_resolution.rs +++ b/packages/windmill/src/postgres/tally_session_resolution.rs @@ -2,6 +2,9 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Inserts and queries `sequent_backend.tally_session_resolution` rows through the Hasura +//! transaction connection. + use anyhow::{anyhow, Result}; use deadpool_postgres::Transaction; use sequent_core::types::ceremonies::{ @@ -12,6 +15,13 @@ use tokio_postgres::Row; use tracing::{info, instrument}; use uuid::Uuid; +/// Builds a [`TallySessionResolution`] from a `tally_session_resolution` SELECT row. +/// +/// # Errors +/// +/// Returns an error if UUID or JSON fields cannot be deserialized into the core types +/// (invalid stored `resolution_type` / `status` strings, malformed `resolution_data`, or +/// other serde failures). fn map_row_to_resolution(row: &Row) -> Result { let resolution_type_str: String = row.get(7); let status_str: String = row.get(8); @@ -36,7 +46,20 @@ fn map_row_to_resolution(row: &Row) -> Result { }) } -/// Create a new pending resolution +/// Inserts a new resolution row with `status = 'pending'` for the given contest in a tally +/// session. +/// +/// # Returns +/// +/// The database id of the new row as a string (UUID text). +/// +/// # Errors +/// +/// - Fails if any of `tenant_id`, `election_event_id`, `tally_session_id`, or `contest_id` is +/// not a valid UUID string. +/// - Propagates JSON serialization errors when encoding `resolution_data`. +/// - Propagates Postgres errors from `INSERT ... RETURNING id` (including constraint +/// violations). #[instrument(skip(hasura_transaction))] pub async fn create_tally_session_resolution( hasura_transaction: &Transaction<'_>, @@ -77,7 +100,15 @@ pub async fn create_tally_session_resolution( Ok(id.to_string()) } -/// Get pending resolutions for a tally session +/// Lists all `pending` resolutions for a tally session, oldest first. +/// +/// # Errors +/// +/// - Fails if any of `tenant_id`, `election_event_id`, or `tally_session_id` is not a valid +/// UUID string. +/// - Propagates Postgres query errors. +/// - Propagates row mapping errors from [`map_row_to_resolution`] if stored enum or JSON +/// values are invalid. #[instrument(skip(hasura_transaction))] pub async fn get_pending_resolutions( hasura_transaction: &Transaction<'_>, @@ -119,7 +150,15 @@ pub async fn get_pending_resolutions( Ok(resolutions) } -/// Get all resolutions for a tally session (both pending and resolved) +/// Lists every resolution row for a tally session (any status), oldest first. +/// +/// # Errors +/// +/// - Fails if any of `tenant_id`, `election_event_id`, or `tally_session_id` is not a valid +/// UUID string. +/// - Propagates Postgres query errors. +/// - Propagates row mapping errors from [`map_row_to_resolution`] if stored enum or JSON +/// values are invalid. #[instrument(skip(hasura_transaction))] pub async fn get_resolution_by_tally_session( hasura_transaction: &Transaction<'_>, @@ -160,7 +199,16 @@ pub async fn get_resolution_by_tally_session( Ok(resolutions) } -/// Submit a resolution for a pending item +/// Submit a resolution for a pending item. +/// +/// # Errors +/// +/// - Fails if `resolution_id`, `tenant_id`, `election_event_id`, or `resolved_by_user` is +/// not a valid UUID string. +/// - Propagates JSON serialization errors when encoding `resolution`. +/// - Propagates Postgres errors from the `UPDATE`. +/// - Returns `Err` with a not-found message when no row matched (wrong id, tenant/event +/// mismatch, or row was not `pending`). #[instrument(skip(hasura_transaction))] pub async fn submit_resolution( hasura_transaction: &Transaction<'_>, @@ -207,6 +255,15 @@ pub async fn submit_resolution( } /// Update the resolution decision for an already-resolved record +/// +/// # Errors +/// +/// - Fails if `resolution_id`, `tenant_id`, `election_event_id`, or `resolved_by_user` is +/// not a valid UUID string. +/// - Propagates JSON serialization errors when encoding `resolution`. +/// - Propagates Postgres errors from the `UPDATE`. +/// - Returns `Err` with a not-found message when no row matched the id and tenant/event +/// scope. #[instrument(skip(hasura_transaction))] pub async fn update_resolution( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/postgres/tally_sheet.rs b/packages/windmill/src/postgres/tally_sheet.rs index 9349659ff2..d74c87c0d9 100644 --- a/packages/windmill/src/postgres/tally_sheet.rs +++ b/packages/windmill/src/postgres/tally_sheet.rs @@ -14,6 +14,7 @@ use tokio_postgres::types::ToSql; use tracing::instrument; use uuid::Uuid; +/// Tally sheet wrapper pub struct TallySheetWrapper(pub TallySheet); impl TryFrom for TallySheetWrapper { @@ -41,6 +42,12 @@ impl TryFrom for TallySheetWrapper { })) } } +/// Get published tally sheets by event from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip_all)] pub async fn get_published_tally_sheets_by_event( @@ -84,6 +91,12 @@ pub async fn get_published_tally_sheets_by_event( Ok(election_events) } +/// Publish tally sheet in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn publish_tally_sheet( diff --git a/packages/windmill/src/postgres/tasks_execution.rs b/packages/windmill/src/postgres/tasks_execution.rs index 353499bc29..a5aff65fac 100644 --- a/packages/windmill/src/postgres/tasks_execution.rs +++ b/packages/windmill/src/postgres/tasks_execution.rs @@ -16,9 +16,10 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Tasks execution wrapper pub struct TasksExecutionWrapper(pub TasksExecution); -// implements a conversion from a database row to that TasksExecutionWrapper structure +/// Implements a conversion from a database row to that TasksExecutionWrapper structure impl TryFrom for TasksExecutionWrapper { type Error = anyhow::Error; @@ -42,6 +43,12 @@ impl TryFrom for TasksExecutionWrapper { })) } } +/// Insert tasks execution into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(annotations, labels, logs), err)] pub async fn insert_tasks_execution( @@ -118,6 +125,12 @@ pub async fn insert_tasks_execution( Ok(task_execution) } +/// Update task execution status in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. pub async fn update_task_execution_status( tenant_id: &str, @@ -174,6 +187,12 @@ pub async fn update_task_execution_status( Ok(()) } +/// Get task by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(), err)] pub async fn get_task_by_id(task_id: &str) -> Result { @@ -212,6 +231,12 @@ pub async fn get_task_by_id(task_id: &str) -> Result { Ok(task_execution) } +/// Get tasks by election event id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(), err)] pub async fn get_tasks_by_election_event_id( diff --git a/packages/windmill/src/postgres/template.rs b/packages/windmill/src/postgres/template.rs index 16311411ff..701e5bda04 100644 --- a/packages/windmill/src/postgres/template.rs +++ b/packages/windmill/src/postgres/template.rs @@ -9,6 +9,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Template wrapper pub struct TemplateWrapper(pub Template); impl TryFrom for TemplateWrapper { @@ -29,7 +30,12 @@ impl TryFrom for TemplateWrapper { } } -/* Returns election */ +/// Get template by alias from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_template_by_alias( @@ -74,6 +80,12 @@ pub async fn get_template_by_alias( Ok(elections.first().cloned()) } +/// Get templates by tenant id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_templates_by_tenant_id( @@ -123,6 +135,12 @@ pub async fn get_templates_by_tenant_id( Ok(templates) } +/// Insert templates into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip_all)] pub async fn insert_templates( diff --git a/packages/windmill/src/postgres/tenant.rs b/packages/windmill/src/postgres/tenant.rs index 29729807ee..98610cf787 100644 --- a/packages/windmill/src/postgres/tenant.rs +++ b/packages/windmill/src/postgres/tenant.rs @@ -9,6 +9,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Tenant wrapper pub struct TenantWrapper(pub Tenant); impl TryFrom for TenantWrapper { @@ -28,6 +29,12 @@ impl TryFrom for TenantWrapper { })) } } +/// Get tenant by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_tenant_by_id( @@ -69,6 +76,12 @@ pub async fn get_tenant_by_id( let tenant = tenants.first().cloned().context("Error obtaining Tenant")?; Ok(tenant) } +/// Update tenant in the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn update_tenant( @@ -127,6 +140,12 @@ pub async fn update_tenant( Ok(()) } +/// Insert tenant into the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn insert_tenant( @@ -160,6 +179,12 @@ pub async fn insert_tenant( Ok(()) } +/// Get tenant by id if exist from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_tenant_by_id_if_exist( @@ -205,6 +230,12 @@ pub async fn get_tenant_by_id_if_exist( let tenant = tenants.first().cloned().context("Error obtaining Tenant")?; Ok(Some(tenant)) } +/// Get tenant by slug if exist from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_tenant_by_slug_if_exist( diff --git a/packages/windmill/src/postgres/trustee.rs b/packages/windmill/src/postgres/trustee.rs index a1ac2838f2..c8a17815b0 100644 --- a/packages/windmill/src/postgres/trustee.rs +++ b/packages/windmill/src/postgres/trustee.rs @@ -11,6 +11,7 @@ use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Trustee wrapper pub struct TrusteeWrapper(pub Trustee); impl TryFrom for TrusteeWrapper { @@ -29,6 +30,12 @@ impl TryFrom for TrusteeWrapper { })) } } +/// Get multiple trustees by id from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_trustees_by_id( @@ -66,6 +73,12 @@ pub async fn get_trustees_by_id( }) .collect::>>() } +/// Get multiple trustees by name from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_trustees_by_name( @@ -98,6 +111,12 @@ pub async fn get_trustees_by_name( }) .collect::>>() } +/// Get trustee by name from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(err, skip(hasura_transaction))] pub async fn get_trustee_by_name( @@ -113,6 +132,11 @@ pub async fn get_trustee_by_name( .cloned() .ok_or(anyhow!("Trustee {name} not found")) } +/// Get all trustees from the database. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. #[instrument(err, skip(hasura_transaction))] pub async fn get_all_trustees( diff --git a/packages/windmill/src/services/application.rs b/packages/windmill/src/services/application.rs index 89c46cec05..55fe60bbae 100644 --- a/packages/windmill/src/services/application.rs +++ b/packages/windmill/src/services/application.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing application workflows and verification processes. + use super::users::{lookup_users, FilterOption, ListUsersFilter}; use crate::postgres::application::get_permission_label_from_post; use crate::postgres::area::get_event_areas; @@ -41,6 +44,8 @@ use unicode_normalization::char::decompose_canonical; #[allow(non_camel_case_types)] #[derive(Display, Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +/// Enumerates outcomes or modes for E card type. +#[allow(missing_docs)] pub enum ECardType { #[strum(serialize = "philSysID")] #[serde(rename = "philSysID")] @@ -61,16 +66,24 @@ pub enum ECardType { /// Struct for email/sms Accepted/Rejected Communication object. #[derive(Serialize, Deserialize, Debug, Clone)] +#[allow(clippy::missing_docs_in_private_items)] struct ApplicationCommunication { accepted: ApplicationCommunicationChannels, rejected: ApplicationCommunicationChannels, } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Application communication channels +#[allow(clippy::missing_docs_in_private_items)] struct ApplicationCommunicationChannels { email: EmailConfig, sms: SmsConfig, } +/// Verify application workflow. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(skip_all, err)] pub async fn verify_application( @@ -162,6 +175,7 @@ pub async fn verify_application( Ok(result) } +/// Get permission label and area from applicant data. #[instrument(err, skip_all)] async fn get_permission_label_and_area_from_applicant_data( hasura_transaction: &Transaction<'_>, @@ -190,7 +204,7 @@ async fn get_permission_label_and_area_from_applicant_data( ) .await; } - +/// Setup users filter from applicant data. #[instrument(err, skip_all)] fn get_filter_from_applicant_data( tenant_id: String, @@ -278,6 +292,7 @@ fn get_filter_from_applicant_data( }) } +/// Build manual verify reason. #[instrument(skip_all)] fn build_manual_verify_reason(fields_match: HashMap) -> String { let mismatch_fields = fields_match @@ -299,6 +314,8 @@ fn build_manual_verify_reason(fields_match: HashMap) -> String { } #[derive(Serialize, Deserialize, Debug)] +/// Application annotations +#[allow(clippy::missing_docs_in_private_items)] pub struct ApplicationAnnotations { session_id: Option, credentials: Option, @@ -317,6 +334,8 @@ pub struct ApplicationAnnotations { } #[derive(Serialize, Deserialize, Debug)] +/// Application verification result +#[allow(missing_docs)] pub struct ApplicationVerificationResult { pub user_id: Option, pub username: String, @@ -330,6 +349,7 @@ pub struct ApplicationVerificationResult { pub manual_verify_reason: Option, } +/// Automatic verification workflow. #[instrument(err, skip_all)] fn automatic_verification( users: Vec, @@ -489,7 +509,9 @@ fn automatic_verification( }) } +/// Type alias `VerificationMismatchSummary` to keep signatures readable. type VerificationMismatchSummary = (usize, usize, HashMap, HashMap); +/// Check mismatches workflow. #[instrument(err)] fn check_mismatches( @@ -684,6 +706,10 @@ async fn get_i18n_default_application_communication( } /// Get the accepted/rejected message from the internalization object in presentation. +/// +/// # Errors +/// +/// Returns an error if default templates cannot be loaded/parsed or the status is invalid. #[instrument(skip_all)] pub async fn get_i18n_application_communication( presentation: ElectionEventPresentation, @@ -734,6 +760,10 @@ pub async fn get_i18n_application_communication( } /// Get the accepted/rejected message if configured, otherwise the default. +/// +/// # Errors +/// +/// Returns an error if templates cannot be loaded/parsed from defaults or presentation i18n data. #[instrument(skip(presentation), err)] pub async fn get_application_response_communication( communication_method: Option, @@ -766,6 +796,12 @@ pub async fn get_application_response_communication( _ => Ok((None, None)), } } +/// Confirm application workflow. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails, +/// if UUID or other parsing fails. #[instrument(skip_all, err)] pub async fn confirm_application( @@ -923,6 +959,11 @@ pub async fn confirm_application( Ok((application, user)) } +/// Reject application workflow. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(skip(hasura_transaction), err)] pub async fn reject_application( @@ -1004,6 +1045,11 @@ pub async fn reject_application( Ok(application) } +/// Send application communication response. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(err, skip_all)] pub async fn send_application_communication_response( @@ -1102,6 +1148,11 @@ pub async fn send_application_communication_response( Ok(()) } +/// Get group names from the keycloak database. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(err, skip_all)] pub async fn get_group_names(realm: &str, user_id: &str) -> Result> { @@ -1125,6 +1176,7 @@ pub async fn get_group_names(realm: &str, user_id: &str) -> Result> Ok(group_names) } +/// Convert string to unaccented. #[instrument(skip_all)] fn string_to_unaccented(word: String) -> String { let mut unaccented_word = String::new(); @@ -1140,6 +1192,7 @@ fn string_to_unaccented(word: String) -> String { unaccented_word } +/// Convert to unaccented without hyphen. #[instrument(skip_all)] fn to_unaccented_without_hyphen(word: Option) -> Option { let word = match word { diff --git a/packages/windmill/src/services/ballot_styles/ballot_publication.rs b/packages/windmill/src/services/ballot_styles/ballot_publication.rs index 3f07b16b77..3e061f8d97 100644 --- a/packages/windmill/src/services/ballot_styles/ballot_publication.rs +++ b/packages/windmill/src/services/ballot_styles/ballot_publication.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + use crate::postgres::ballot_publication::{ get_ballot_publication_by_id, get_previous_publication, get_previous_publication_election, insert_ballot_publication, soft_delete_other_ballot_publications, update_ballot_publication, @@ -26,6 +27,15 @@ use tracing::{event, instrument, Level}; use super::ballot_style; +/// Resolves which election ids a new publication should cover. +/// +/// When `election_id_opt` is set, returns that single id; otherwise loads all elections for the +/// event from Postgres. +/// +/// # Errors +/// +/// Propagates errors from [`crate::postgres::election::get_elections_ids`] when no explicit +/// election id was supplied. #[instrument(skip(hasura_transaction), err)] async fn get_election_ids_for_publication( hasura_transaction: &Transaction<'_>, @@ -42,6 +52,17 @@ async fn get_election_ids_for_publication( Ok(elections_ids) } +/// Inserts a ballot publication row for the tenant/event, then queues a Celery task to rebuild +/// ballot styles for that publication. +/// +/// # Returns +/// +/// The new ballot publication id. +/// +/// # Errors +/// +/// - Postgres errors from election lookup, insert, or follow-up reads. +/// - Celery dispatch failures when sending `update_election_event_ballot_styles`. #[instrument(err)] pub async fn add_ballot_publication( hasura_transaction: &Transaction<'_>, @@ -87,6 +108,15 @@ pub async fn add_ballot_publication( Ok(ballot_publication.id.clone()) } +/// Marks a generated ballot publication as published: soft-deletes competing rows, stamps +/// `published_at`, flips election/event published flags, and writes an electoral-log entry. +/// +/// # Errors +/// +/// - Returns `Err` if the publication is missing, not yet generated, or already published (early +/// `Ok` when already published). +/// - Anyhow-wrapped failures from Postgres updates, status serialization, electoral log RPC, or +/// missing bulletin board configuration. #[instrument(err)] pub async fn update_publish_ballot( hasura_transaction: &Transaction<'_>, @@ -196,6 +226,14 @@ pub async fn update_publish_ballot( Ok(()) } +/// Loads ballot EML JSON values for a publication, optionally filtered to one election and capped +/// by `limit`, and returns them as a JSON array of deserialized values. +/// +/// # Errors +/// +/// - Postgres errors from [`crate::postgres::ballot_style::get_publication_ballot_styles`]. +/// - Deserialization failures when turning stored EML strings into JSON values, or `Err` if any +/// filtered row decodes to an empty style. #[instrument(skip(hasura_transaction), err)] pub async fn get_publication_json( hasura_transaction: &Transaction<'_>, @@ -235,18 +273,33 @@ pub async fn get_publication_json( Ok(serde_json::Value::Array(val_arr)) } +/// Snapshot of ballot styles JSON for one publication id. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PublicationStyles { + /// Publication row this snapshot refers to. ballot_publication_id: String, + /// JSON array of ballot style payloads from [`get_publication_json`] for that publication. ballot_styles: Value, } +/// Pair of style snapshots for comparing a publication to its chronologically previous one. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PublicationDiff { + /// Styles for the requested publication. current: PublicationStyles, + /// Styles for the prior publication in the same scope, if any. previous: Option, } +/// Builds [`PublicationDiff`] between `ballot_publication_id` and the latest earlier publication +/// for the same election (when scoped) or event-wide otherwise. +/// +/// # Errors +/// +/// - Missing publication row, Postgres failures, or failures while loading JSON via +/// [`get_publication_json`]. +/// - `Err` when an election-scoped publication has no discoverable predecessor (logged as +/// context in some branches but surfaced from inner helpers). #[instrument(err)] pub async fn get_ballot_publication_diff( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ballot_styles/ballot_style.rs b/packages/windmill/src/services/ballot_styles/ballot_style.rs index 8a11634342..78c5b4887d 100644 --- a/packages/windmill/src/services/ballot_styles/ballot_style.rs +++ b/packages/windmill/src/services/ballot_styles/ballot_style.rs @@ -41,21 +41,30 @@ use sequent_core::services::date::ISO8601; use sequent_core::services::area_tree::TreeNode; +/// File name used when uploading the public election-event presentation JSON to object storage. pub const EVENT_CONFIG_FILE_NAME: &str = "election_event_config.json"; +/// Serializable payload written to [`EVENT_CONFIG_FILE_NAME`] used by the voting portal. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ElectionEventConfig { + /// Unique id for this uploaded config revision. pub id: String, + /// Election event this presentation belongs to. pub election_event_id: String, + /// Tenant scope for the document path and access policy. pub tenant_id: String, + /// Presentation of the election event. pub election_event_presentation: ElectionEventPresentation, } -/** - * Returns a HashMap> with all - * the election ids and contest ids related to an area, - * taking into consideration the parent areas as well. - */ +/// Returns a HashMap> with all +/// the election ids and contest ids related to an area, +/// including contests linked via parent areas in `areas_tree`. +/// +/// # Errors +/// +/// - `Err` when the publication lists no election ids. +/// - `Err` when `area` is not present in `areas_tree`. pub fn get_elections_contests_map_for_area( area: &Area, areas_tree: &TreeNode, @@ -107,6 +116,14 @@ pub fn get_elections_contests_map_for_area( Ok(election_contest_map) } +/// Inserts one [`BallotStyle`] row per election that touches `area`, using `sequent_core` to build +/// the ballot JSON from elections, contests, candidates, dates, and optional joint public keys. +/// +/// # Errors +/// +/// - Errors from [`get_elections_contests_map_for_area`] when the area cannot be resolved. +/// - Missing election or contest rows, ballot construction errors, JSON serialization failures, +/// or Postgres insert errors from [`crate::postgres::ballot_style::insert_ballot_style`]. pub async fn create_ballot_style_postgres( transaction: &Transaction<'_>, area: &Area, @@ -200,6 +217,11 @@ pub async fn create_ballot_style_postgres( /// Creates a JSON file with the election event config with presentation data /// and uploads it to S3 public bucket. +/// +/// # Errors +/// +/// - Presentation parsing errors from the election event model. +/// - Temp file I/O, JSON serialization, or S3/document service failures. pub async fn create_public_election_event_config_file( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -241,6 +263,20 @@ pub async fn create_public_election_event_config_file( Ok(()) } +/// Regenerates ballot styles for every area under a publication: acquires a Postgres advisory lock, +/// reloads elections/contests/candidates/areas, writes ballot rows, marks the publication +/// generated, and refreshes the public config file. +/// +/// # Panics +/// +/// Panics if computing the lock expiry (`now + 60s`) overflows representable range (same as +/// `expect("lock expiration overflow")` on the timestamp arithmetic). +/// +/// # Errors +/// +/// - Pool, transaction, or query failures while loading Hasura-backed rows. +/// - Missing ballot publication, ballot style generation errors, commit failures, or lock +/// acquisition/release problems (surfaced as anyhow contexts). #[instrument(err)] pub async fn update_election_event_ballot_styles( tenant_id: &str, diff --git a/packages/windmill/src/services/ballot_styles/mod.rs b/packages/windmill/src/services/ballot_styles/mod.rs index 3e2de3ab99..56170ff60b 100644 --- a/packages/windmill/src/services/ballot_styles/mod.rs +++ b/packages/windmill/src/services/ballot_styles/mod.rs @@ -2,5 +2,9 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Ballot style generation, ballot publication lifecycle + +/// Handles ballot publication lifecycle. pub mod ballot_publication; +/// Handles ballot style generation. pub mod ballot_style; diff --git a/packages/windmill/src/services/cast_votes.rs b/packages/windmill/src/services/cast_votes.rs index b595d34496..bc46cac6e7 100644 --- a/packages/windmill/src/services/cast_votes.rs +++ b/packages/windmill/src/services/cast_votes.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing cast votes, ballots, and vote-related joins. + use super::database::PgConfig; use super::sql_utils::escape_sql_literal; use crate::services::datafix::utils::{ @@ -27,6 +30,8 @@ use tracing::{debug, info, instrument}; use uuid::Uuid; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +/// Cast vote +#[allow(missing_docs)] pub struct CastVote { pub id: String, pub tenant_id: String, @@ -63,6 +68,15 @@ impl TryFrom for CastVote { }) } } +/// Get ballot content for a given area and write to a file. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. +/// +/// # Panics +/// +/// Panics only if internal SQL placeholder arithmetic overflows. #[instrument(err)] pub async fn find_area_ballots( @@ -126,6 +140,8 @@ pub async fn find_area_ballots( } #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +/// Votes per election. +#[allow(missing_docs)] pub struct ElectionCastVotes { pub election_id: String, pub census: i64, @@ -144,6 +160,8 @@ impl TryFrom for ElectionCastVotes { } #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +/// Votes per day. +#[allow(missing_docs)] pub struct CastVotesPerDay { pub day: String, pub day_count: i64, @@ -158,6 +176,11 @@ impl TryFrom for CastVotesPerDay { }) } } +/// Counts cast votes election matching the query constraints. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(err)] pub async fn count_cast_votes_election( @@ -207,6 +230,11 @@ pub async fn count_cast_votes_election( Ok(count_data) } +/// Get count votes per day. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(skip(transaction), err)] pub async fn get_count_votes_per_day( @@ -289,6 +317,11 @@ pub async fn get_count_votes_per_day( Ok(cast_votes_by_day) } +/// Get users with vote info. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(skip(hasura_transaction, users), err)] pub async fn get_users_with_vote_info( @@ -424,6 +457,8 @@ pub async fn get_users_with_vote_info( } #[derive(Debug, Serialize, Deserialize)] +/// Votes count by ip for election. +#[allow(clippy::missing_docs_in_private_items)] pub struct CastVoteCountByIp { id: String, ip: Option, @@ -449,6 +484,8 @@ impl TryFrom for CastVoteCountByIp { } #[derive(Debug, PartialEq, Eq, Clone)] +/// List cast votes by ip filter. +#[allow(missing_docs)] pub struct ListCastVotesByIpFilter { pub limit: Option, pub offset: Option, @@ -456,6 +493,11 @@ pub struct ListCastVotesByIpFilter { pub country: Option, pub election_id: Option, } +/// Get top count votes by ip. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(skip(hasura_transaction), err)] pub async fn get_top_count_votes_by_ip( @@ -550,6 +592,11 @@ pub async fn get_top_count_votes_by_ip( Ok((cast_votes_by_ip, count)) } +/// Count ballots by election. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(err)] pub async fn count_ballots_by_election( @@ -595,6 +642,11 @@ pub async fn count_ballots_by_election( Ok(vote_count) } +/// Count ballots by area id. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(err)] pub async fn count_ballots_by_area_id( @@ -648,6 +700,11 @@ pub async fn count_ballots_by_area_id( Ok(vote_count) } +/// Count cast votes election event. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. #[instrument(err)] pub async fn count_cast_votes_election_event( diff --git a/packages/windmill/src/services/celery_app.rs b/packages/windmill/src/services/celery_app.rs index c862b21afc..165f8b7649 100644 --- a/packages/windmill/src/services/celery_app.rs +++ b/packages/windmill/src/services/celery_app.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // SPDX-License-Identifier: AGPL-3.0-only +//! Celery app construction, broker tuning, and per-deployment queue management. + use anyhow::{anyhow, Context, Result}; use async_once::AsyncOnce; use celery::prelude::Task; @@ -65,6 +67,7 @@ use crate::tasks::set_public_key::set_public_key; use crate::tasks::update_election_event_ballot_styles::update_election_event_ballot_styles; #[derive(AsRefStr, Debug)] +#[allow(missing_docs)] pub enum Queue { #[strum(serialize = "beat")] Beat, @@ -88,48 +91,63 @@ pub enum Queue { } impl Queue { + /// Get the queue name for the Celery app. pub fn queue_name(&self, slug: &str) -> String { format!("{}_{}", slug, self.as_ref()) } } +/// AMQP prefetch count used when building the Celery app. static mut PREFETCH_COUNT_S: u16 = 100; +/// Whether tasks are ACKed only after successful execution. static mut ACKS_LATE_S: bool = true; +/// Maximum number of retries configured for Celery tasks. static mut TASK_MAX_RETRIES: u32 = 4; +/// Global switch used to pause/resume the app workers. static mut IS_APP_ACTIVE: bool = true; +/// Max retries while establishing the broker connection. static mut BROKER_CONNECTION_MAX_RETRIES: u32 = 5; +/// AMQP heartbeat interval in seconds. static mut HEARTBEAT_SECS: u16 = 10; +/// Number of worker threads used by the Celery runtime. static mut WORKER_THREADS: usize = 1; +/// Explicit queue names to consume from, when configured. static mut QUEUES: Vec = vec![]; +/// Set the prefetch count for the Celery app. pub fn set_prefetch_count(new_val: u16) { unsafe { PREFETCH_COUNT_S = new_val; } } +/// Set the number of worker threads for the Celery app. pub fn set_worker_threads(new_val: usize) { unsafe { WORKER_THREADS = new_val; } } +/// Get the number of worker threads for the Celery app. pub fn get_worker_threads() -> usize { unsafe { WORKER_THREADS } } +/// Set whether tasks are ACKed late for the Celery app. pub fn set_acks_late(new_val: bool) { unsafe { ACKS_LATE_S = new_val; } } +/// Set the maximum number of retries for tasks for the Celery app. pub fn set_task_max_retries(new_val: u32) { unsafe { TASK_MAX_RETRIES = new_val; } } +/// Set the queues for the Celery app. pub fn set_queues(new_val: Vec) { unsafe { QUEUES = new_val; @@ -137,28 +155,33 @@ pub fn set_queues(new_val: Vec) { } #[instrument] +/// Set whether the Celery app is active. pub fn set_is_app_active(new_val: bool) { unsafe { IS_APP_ACTIVE = new_val; } } +/// Set the maximum number of retries for broker connections for the Celery app. pub fn set_broker_connection_max_retries(new_val: u32) { unsafe { BROKER_CONNECTION_MAX_RETRIES = new_val; } } +/// Set the heartbeat interval for the Celery app. pub fn set_heartbeat(new_val: u16) { unsafe { HEARTBEAT_SECS = new_val; } } +/// Get whether the Celery app is active. pub fn get_is_app_active() -> bool { unsafe { IS_APP_ACTIVE } } +/// Get the queues for the Celery app. pub fn get_queues() -> Vec { unsafe { QUEUES.clone() } } @@ -180,6 +203,11 @@ pub async fn get_celery_app() -> Arc { CELERY_APP.get().await.clone() } +/// Establish an AMQP connection and store it for reuse. +/// +/// # Errors +/// +/// Returns an error if the AMQP address is missing or connection attempts fail. #[instrument] async fn create_connection() -> Result<(Arc, String)> { // you can use "amqp://rabbitmq2:5672,amqp://rabbitmq:5672" for $AMQP_ADDR to configure multiple nodes, separated by comma @@ -213,6 +241,12 @@ async fn create_connection() -> Result<(Arc, String)> { Err(last_error.unwrap_or(anyhow!("Failed to connect to any AMQP server"))) } +/// Build and configure the global Celery application for this worker. +/// +/// # Errors +/// +/// Returns an error if required environment variables are missing, broker connection fails, +/// or task/plugin initialization fails. #[instrument] pub async fn generate_celery_app() -> Result> { let prefetch_count: u16; @@ -355,10 +389,15 @@ pub async fn generate_celery_app() -> Result> { .map_err(|err| anyhow!("{:?}", err)) } +/// Cached AMQP connection used by the worker process. static CELERY_CONNECTION: RwLock>> = RwLock::const_new(None); /// Returns a reused AMQP connection wrapped in an Arc. /// If no connection exists (or if it’s disconnected), a new connection is created and stored. +/// +/// # Errors +/// +/// Returns an error if a new broker connection cannot be established. #[instrument] pub async fn get_celery_connection() -> Result> { let conn_guard = CELERY_CONNECTION.read().await; diff --git a/packages/windmill/src/services/ceremonies/encrypter.rs b/packages/windmill/src/services/ceremonies/encrypter.rs index d4eca0d436..4640b964e7 100644 --- a/packages/windmill/src/services/ceremonies/encrypter.rs +++ b/packages/windmill/src/services/ceremonies/encrypter.rs @@ -16,11 +16,20 @@ use std::path::{Path, PathBuf}; use tracing::{info, instrument}; use walkdir::WalkDir; +/// File name for multi-contest ballot image exports. pub const MC_BALLOT_IMAGES_FILE_NAME: &str = "mcballots_images"; +/// File name for single-contest ballot image exports. pub const BALLOT_IMAGES_FILE_NAME: &str = "ballot_images"; +/// File name for trustee initialization PDFs. pub const INITIALIZATION_REPORT_FILE_NAME: &str = "INITIALIZATION_REPORT"; +/// File name for aggregated electoral results reports. pub const ELECTORAL_RESULTS_FILE_NAME: &str = "ELECTORAL_RESULTS"; +/// Maps a file name to a [`ReportType`]. +/// +/// # Errors +/// +/// This helper only returns `Ok` variants today; errors are reserved for future filename parsing. #[instrument(err, skip_all)] pub fn get_file_report_type(file_name: &str) -> Result> { if file_name.contains(MC_BALLOT_IMAGES_FILE_NAME) || file_name.contains(BALLOT_IMAGES_FILE_NAME) @@ -35,7 +44,17 @@ pub fn get_file_report_type(file_name: &str) -> Result> { } } -// returns a map from the report id to the secret password +/// Walks `folder_path`, finds report-like files, extracts embedded election UUIDs from each path +/// string, and loads configured-password secrets from the vault keyed by matching [`Report`] rows. +/// +/// # Panics +/// +/// Panics if the static election-id regular expression fails to compile (a programmer error). +/// +/// # Errors +/// +/// - `Err` when `folder_path` is not a directory. +/// - Vault read failures, missing passwords, or I/O errors while traversing files. #[instrument(err, skip_all)] pub async fn traversal_find_secrets_for_files( hasura_transaction: &Transaction<'_>, @@ -115,7 +134,17 @@ pub async fn traversal_find_secrets_for_files( Ok(report_secrets_map) } -/// Encrypts all eligible files in a directory +/// Encrypts every password-protected file under `folder_path` using secrets from +/// `report_secrets_map`. +/// +/// # Panics +/// +/// Panics if the static election-id regular expression fails to compile (a programmer error). +/// +/// # Errors +/// +/// - `Err` when `folder_path` is not a directory. +/// - Encryption failures bubbled up from [`encrypt_directory_contents`]. #[instrument(err, skip_all)] pub async fn traversal_encrypt_files( report_secrets_map: HashMap, @@ -167,6 +196,13 @@ pub async fn traversal_encrypt_files( Ok(()) } +/// Encrypts `old_path` when `report_type` matches a report configured with a configured password, +/// returning either the encrypted path or the original path when encryption does not apply. +/// +/// # Errors +/// +/// - Missing report match, missing vault secret, AES encryption errors, or filesystem errors while +/// replacing the plaintext file. #[instrument(err, skip(hasura_transaction, election_ids, all_reports, old_path))] pub async fn encrypt_directory_contents_sql( hasura_transaction: &Transaction<'_>, @@ -222,6 +258,13 @@ pub async fn encrypt_directory_contents_sql( Ok(upload_path) } +/// Same selection rules as [`encrypt_directory_contents_sql`] but reads the password from an +/// in-memory map produced by [`traversal_find_secrets_for_files`]. +/// +/// # Errors +/// +/// - Missing password entry for a matched report, encryption failures, or filesystem errors when +/// removing the plaintext source file. #[instrument(err, skip(report_secrets_map, election_ids, all_reports, old_path))] pub async fn encrypt_directory_contents( report_secrets_map: &HashMap, @@ -267,6 +310,11 @@ pub async fn encrypt_directory_contents( Ok(upload_path) } +/// Writes `old_path` to `{old_path}.enc` with AES-256-CBC and deletes the plaintext copy. +/// +/// # Errors +/// +/// Propagates encryption or deletion failures from the consolidation helper and filesystem APIs. #[instrument(err, skip_all)] pub fn encrypt_file_inner(old_path: &str, encryption_password: &str) -> Result { let new_path = format!("{old_path}.enc"); @@ -280,6 +328,12 @@ pub fn encrypt_file_inner(old_path: &str, encryption_password: &str) -> Result, diff --git a/packages/windmill/src/services/ceremonies/insert_ballots.rs b/packages/windmill/src/services/ceremonies/insert_ballots.rs index 236ce454f9..b4c3c492d2 100644 --- a/packages/windmill/src/services/ceremonies/insert_ballots.rs +++ b/packages/windmill/src/services/ceremonies/insert_ballots.rs @@ -47,6 +47,13 @@ use deadpool_postgres::Client as DbClient; use std::sync::Arc; // Add this import +/// Loads trustees, derives ciphertext batches for each `tally_session_contest`, optionally posts +/// them to the configured bulletin board, and returns the updated contest rows. +/// +/// # Errors +/// +/// Trustee lookup/deserialization failures, protocol manager errors, CSV or crypto errors, pool +/// acquisition failures, or any `?` bubbled from board/network helpers inside the parallel tasks. #[instrument(skip_all, err)] pub async fn insert_ballots_messages( hasura_transaction: &Transaction<'_>, @@ -326,6 +333,12 @@ pub async fn insert_ballots_messages( Ok(tally_session_contests_updated) } +/// Returns each election’s configured end dates by parsing presentation JSON from Postgres. +/// +/// # Errors +/// +/// Database errors, JSON deserialization failures for presentation blobs, or invalid ISO8601 date +/// strings stored in election presentation data. #[instrument(skip_all, err)] pub async fn get_elections_end_dates( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ceremonies/keys_ceremony.rs b/packages/windmill/src/services/ceremonies/keys_ceremony.rs index de62319e85..3b2d1af9d7 100644 --- a/packages/windmill/src/services/ceremonies/keys_ceremony.rs +++ b/packages/windmill/src/services/ceremonies/keys_ceremony.rs @@ -30,7 +30,12 @@ use tracing::instrument; use tracing::{event, info, Level}; use uuid::Uuid; -// returns (board_name, election_id), where the election_id might be None for an event Board +// Returns (board_name, election_id), where the election_id might be None for an event Board +/// +/// # Errors +/// +/// Missing election event rows, missing bulletin board references, missing `ENV_SLUG`, missing +/// elections linked to the ceremony, or protocol manager failures when resolving election boards. #[instrument(skip(transaction), err)] pub async fn get_keys_ceremony_board( transaction: &Transaction<'_>, @@ -69,6 +74,13 @@ pub async fn get_keys_ceremony_board( } } +/// Returns the trustee’s encrypted private key blob after validating JWT trustee claims, ceremony +/// execution state, and membership; updates ceremony logs/status in Postgres. +/// +/// # Errors +/// +/// Missing trustee claim, invalid ceremony status, trustee not listed, board/protocol errors when +/// fetching ciphertext material, or Postgres update failures. #[instrument(err)] pub async fn get_private_key( transaction: &Transaction<'_>, @@ -173,6 +185,12 @@ pub async fn get_private_key( Ok(encrypted_private_key) } +/// Fetches the encrypted private key for `trustee_name`. +/// +/// # Errors +/// +/// Board resolution failures, missing trustees/public keys, or immudb/protocol errors when reading +/// ciphertext from the board. #[instrument(skip(transaction), err)] pub async fn find_trustee_private_key( transaction: &Transaction<'_>, @@ -194,6 +212,13 @@ pub async fn find_trustee_private_key( get_trustee_encrypted_private_key(board_name.as_str(), trustee_public_key.as_str()).await } +/// Verifies `private_key_base64` matches the ciphertext stored for the authenticated trustee. +/// +/// # Errors +/// +/// Same class of failures as [`get_private_key`], plus explicit `Err` when ceremony status or +/// trustee state does not allow verification, when the key material mismatches, or when status +/// JSON cannot be serialized for persistence. #[instrument(err)] pub async fn check_private_key( transaction: &Transaction<'_>, @@ -310,6 +335,14 @@ pub async fn check_private_key( Ok(true) } +/// Inserts a new keys ceremony row, wires elections to it, records initial trustee states, and posts +/// an electoral log entry. +/// +/// # Errors +/// +/// Trustee lookup mismatches, invalid thresholds, duplicate default ceremonies, conflicting +/// per-election ceremonies, serialization failures, Postgres insert failures, or electoral log +/// write errors. #[instrument(err)] pub async fn create_keys_ceremony( transaction: &Transaction<'_>, @@ -476,6 +509,13 @@ pub async fn create_keys_ceremony( Ok(keys_ceremony_id) } +/// Ensures every permission label required by the targeted election(s) appears in the caller’s +/// Keycloak permission JSON payload. +/// +/// # Errors +/// +/// Database failures loading election labels, missing user label payloads, JSON parse errors when +/// interpreting the trimmed Keycloak structure, or deserialization failures. #[instrument(skip(hasura_transaction), err)] pub async fn validate_permission_labels( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ceremonies/mod.rs b/packages/windmill/src/services/ceremonies/mod.rs index e848964fc8..f8d72e6f77 100644 --- a/packages/windmill/src/services/ceremonies/mod.rs +++ b/packages/windmill/src/services/ceremonies/mod.rs @@ -2,15 +2,29 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Keys ceremony, tally ceremony, Velvet-backed tally runs, and results persistence helpers. + +/// Encryption helpers used when packaging ceremony outputs. pub mod encrypter; +/// Builds trustee ballot batches and posts ciphertexts to the bulletin board. pub mod insert_ballots; +/// Trustee key download/check flows and creating keys ceremonies in Postgres. pub mod keys_ceremony; +/// Safe renaming of Velvet output folders using election and contest display names. pub mod renamer; +/// Uploads tally PDF/JSON/HTML artifacts and mirrors document ids on results tables. pub mod result_documents; +/// Handles tally results persistence. pub mod results; +/// Human-readable ceremony log lines derived from board messages. pub mod serialize_logs; +/// Tally session creation, trustee reconnect handling, and execution status updates. pub mod tally_ceremony; +/// Handles tally progress. pub mod tally_progress; +/// IRV tie-break detection, resolution records, and validation against tally state. pub mod tally_resolution; +/// Handles tally session errors. pub mod tally_session_error; +/// Handles running tally pipes. pub mod velvet_tally; diff --git a/packages/windmill/src/services/ceremonies/renamer.rs b/packages/windmill/src/services/ceremonies/renamer.rs index f3a138683f..5a19676495 100644 --- a/packages/windmill/src/services/ceremonies/renamer.rs +++ b/packages/windmill/src/services/ceremonies/renamer.rs @@ -1,6 +1,10 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! String helpers and depth-first directory renames used when exporting Velvet result folders with +//! human-readable prefixes. + use anyhow::{anyhow, Context, Result}; use std::collections::HashMap; use std::fs; @@ -8,8 +12,15 @@ use std::path::PathBuf; use tracing::{info, instrument}; use walkdir::{DirEntry, WalkDir}; +/// Maximum length retained from the right side of a sanitized folder name (UUID suffix excluded). pub const FOLDER_MAX_CHARS: usize = 200; +/// Walks `folder_path` deepest-first and renames each directory whose name contains keys in +/// `replacements`, applying [`sanitize_filename`] so exports stay filesystem-safe. +/// +/// # Errors +/// +/// Propagates `std::io::Error` from `rename` when a target path cannot be created. #[instrument(skip_all, err)] pub fn rename_folders(replacements: &HashMap, folder_path: &PathBuf) -> Result<()> { // Collect directories and sort by depth in descending order @@ -40,6 +51,7 @@ pub fn rename_folders(replacements: &HashMap, folder_path: &Path Ok(()) } +/// Returns up to the last `n` Unicode characters of `s`. pub fn take_last_n_chars(s: &str, n: usize) -> String { s.chars() .rev() @@ -50,11 +62,13 @@ pub fn take_last_n_chars(s: &str, n: usize) -> String { .collect() } +/// Returns up to the first `n` Unicode characters of `s`. pub fn take_first_n_chars(s: &str, n: usize) -> String { s.chars().take(n).collect() } -// Function to sanitize filenames +/// Sanitizes a filename by replacing cross-platform reserved characters +/// and trimming trailing dots/spaces. fn sanitize_filename(filename: &str) -> String { let sanitized = filename .replace("/", "_") // Linux and macOS directory separator diff --git a/packages/windmill/src/services/ceremonies/result_documents.rs b/packages/windmill/src/services/ceremonies/result_documents.rs index 0977cb28b5..4e179181b0 100644 --- a/packages/windmill/src/services/ceremonies/result_documents.rs +++ b/packages/windmill/src/services/ceremonies/result_documents.rs @@ -50,12 +50,23 @@ use velvet::pipes::generate_reports::{ }; use velvet::pipes::pipe_inputs::PREFIX_ALL_AREAS; +/// MIME type recorded when persisting tally PDFs. pub const MIME_PDF: &str = "application/pdf"; +/// MIME type recorded when persisting tally JSON exports. pub const MIME_JSON: &str = "application/json"; +/// MIME type recorded when persisting tally HTML exports. pub const MIME_HTML: &str = "text/html"; +/// Re-export of [`ResultDocuments`] describing optional filesystem paths for each rendered artifact. pub type ResultDocumentPaths = ResultDocuments; +/// Uploads every populated path on `document_paths`, applying encryption when the active tally type +/// maps to a password-protected report definition. +/// +/// # Errors +/// +/// Report listing failures, encryption errors, file size lookups, or document upload failures from +/// [`process_and_upload_document`]. #[instrument(err, skip_all)] async fn generic_save_documents( document_paths: &ResultDocumentPaths, @@ -141,7 +152,12 @@ async fn generic_save_documents( Ok(documents) } -// Helper function for processing and uploading a document +/// Encrypts `path_option` when `report_type` matches a configured-password report, uploads the bytes +/// to object storage, and returns the new document id string. +/// +/// # Errors +/// +/// Encryption helper failures, missing files, size detection errors, or Hasura document insert errors. #[instrument(err, skip(hasura_transaction, all_reports))] async fn process_and_upload_document( hasura_transaction: &Transaction<'_>, @@ -189,8 +205,16 @@ async fn process_and_upload_document( Ok(None) } +/// Implemented by Velvet result aggregates to expose filesystem layout and persist uploaded document ids. pub trait GenerateResultDocuments { + /// Resolves filesystem paths for each artifact type, optionally scoped to `area_id`. fn get_document_paths(&self, area_id: Option, base_path: &Path) -> ResultDocumentPaths; + /// Uploads artifacts described by `document_paths`, updates results tables, and optionally mirrors + /// ids into SQLite. + /// + /// # Errors + /// + /// Implementation-defined failures from encryption, uploads, or database updates. async fn save_documents( &self, hasura_transaction: &Transaction<'_>, @@ -204,7 +228,9 @@ pub trait GenerateResultDocuments { ) -> Result; } +/// Event-wide export stores a tarball path covering all contests in one archive. impl GenerateResultDocuments for Vec { + /// Points to the Velvet output directory root (tarball export mode). #[instrument(skip_all, name = "Vec::get_document_paths")] fn get_document_paths(&self, area_id: Option, base_path: &Path) -> ResultDocumentPaths { ResultDocumentPaths { @@ -219,6 +245,12 @@ impl GenerateResultDocuments for Vec { } } + /// Create event related documents and update the results_event table. + /// + /// # Errors + /// + /// Tar creation, encryption traversal, secret discovery, uploads, folder rename operations, or + /// Postgres/SQLite updates performed inside this implementation. #[instrument( skip(self, rename_map), err, @@ -409,7 +441,9 @@ impl GenerateResultDocuments for Vec { } } +/// Per-election aggregate export (JSON/PDF/HTML under the election’s Velvet folder). impl GenerateResultDocuments for ElectionReportDataComputed { + /// Locates standard Velvet report filenames for this election’s output directory. fn get_document_paths( &self, _area_id: Option, @@ -459,6 +493,12 @@ impl GenerateResultDocuments for ElectionReportDataComputed { } } + /// Uploads election-level artifacts, records JSON content hash, and updates `results_election`. + /// + /// # Errors + /// + /// Missing report metadata, filesystem/hash errors, [`generic_save_documents`] failures, or + /// Postgres/SQLite update errors. #[instrument( err, skip(self, hasura_transaction), @@ -542,7 +582,13 @@ impl GenerateResultDocuments for ElectionReportDataComputed { } } +/// Per-contest (optionally per-area) export paths under Velvet’s hierarchical output layout. impl GenerateResultDocuments for ReportDataComputed { + /// Resolves JSON/PDF/HTML paths under `output/velvet-generate-reports/...`. + /// + /// # Panics + /// + /// Panics when `self.contest` is missing (`expect("report is missing contest")`). fn get_document_paths(&self, area_id: Option, base_path: &Path) -> ResultDocumentPaths { let contest = self.contest.as_ref().expect("report is missing contest"); @@ -585,6 +631,12 @@ impl GenerateResultDocuments for ReportDataComputed { } } + /// Uploads contest (or area-contest) documents and updates the corresponding results tables. + /// + /// # Errors + /// + /// Hashing or IO errors when reading JSON proofs, [`generic_save_documents`] failures, or database + /// updates for contest/area-contest rows (Hasura and optional SQLite). #[instrument(err, skip(self), name = "ReportDataComputed::save_documents")] async fn save_documents( &self, @@ -664,6 +716,12 @@ impl GenerateResultDocuments for ReportDataComputed { } } +/// Builds a map from raw UUID ids to shortened `"{name}__{uuid}"` folder tokens used when renaming +/// Velvet export directories. +/// +/// # Errors +/// +/// Reserved for future validation failures; currently always returns `Ok`. #[instrument(skip(results, areas), err)] pub fn generate_ids_map( results: &[ElectionReportDataComputed], @@ -713,6 +771,13 @@ pub fn generate_ids_map( Ok(rename_map) } +/// Uploads top-level tally artifacts plus per-election and per-contest exports, including optional +/// per-area bundles derived from `results`. +/// +/// # Errors +/// +/// Failures from [`generate_ids_map`], missing report data during saves, encryption/upload errors, or +/// any database update returned by [`GenerateResultDocuments::save_documents`]. #[instrument(skip(hasura_transaction, results, areas), err)] pub async fn save_result_documents( hasura_transaction: &Transaction<'_>, @@ -813,6 +878,7 @@ pub async fn save_result_documents( Ok(()) } +/// Builds [`ResultDocumentPaths`] for a single area’s Velvet `generate-reports` subdirectory. fn get_area_document_paths( area_id: String, election_id: String, @@ -850,6 +916,11 @@ fn get_area_document_paths( } } +/// Persists per-area report uploads and inserts `results_election_area` document references. +/// +/// # Errors +/// +/// Propagates failures from [`generic_save_documents`], Hasura inserts, or SQLite mirror updates. #[instrument(err, skip(hasura_transaction))] async fn save_area_documents( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ceremonies/results.rs b/packages/windmill/src/services/ceremonies/results.rs index 81e58d564f..ec29a1fd2f 100644 --- a/packages/windmill/src/services/ceremonies/results.rs +++ b/packages/windmill/src/services/ceremonies/results.rs @@ -38,6 +38,17 @@ use velvet::pipes::generate_db::DATABASE_FILENAME; use velvet::pipes::generate_reports::ElectionReportDataComputed; use velvet::pipes::pipe_name::PipeNameOutputDir; +/// Inserts contest, area-contest, election, and candidate result rows for `results_event_id` from +/// Velvet’s computed [`ElectionReportDataComputed`] vector. +/// +/// # Panics +/// +/// Panics if serializing extended contest metrics to JSON fails (`expect` on `serde_json::to_value`). +/// +/// # Errors +/// +/// Percent-to-fraction conversions that fail `try_into`, or any Postgres insert error from the +/// `insert_results_*` helpers. #[instrument(skip_all)] pub async fn save_results( hasura_transaction: &Transaction<'_>, @@ -289,6 +300,12 @@ pub async fn save_results( Ok(()) } +/// When `force_new_id` is set or the tally gained new session batches, inserts a new `results_event` +/// row (sourced from SQLite when a transaction is supplied) so later writes target a fresh id. +/// +/// # Errors +/// +/// SQLite lookup failures, missing results-event metadata, or Postgres insert failures. #[instrument(skip_all)] pub async fn generate_results_id_if_necessary( hasura_transaction: &Transaction<'_>, @@ -328,6 +345,11 @@ pub async fn generate_results_id_if_necessary( } } +/// Persists aggregates and report documents for the optional new `results_event_id`; +/// otherwise returns the previous execution’s event id. +/// +/// # Errors +/// Should never return an error. #[instrument(skip_all)] pub async fn process_results_tables( hasura_transaction: &Transaction<'_>, @@ -387,6 +409,13 @@ pub async fn process_results_tables( } } +/// Updates the SQLite results database tables and uploads the artifact to object storage, +/// and returns the active `results_event_id` and document handles. +/// +/// # Errors +/// +/// SQLite open/transaction failures, async errors propagated through `block_in_place`, document +/// upload failures, or missing filesystem paths when preparing uploads. #[instrument(skip(hasura_transaction, state_opt, previous_execution, areas))] pub async fn populate_results_tables( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ceremonies/serialize_logs.rs b/packages/windmill/src/services/ceremonies/serialize_logs.rs index 84d7dd0aee..fdb95e8e14 100644 --- a/packages/windmill/src/services/ceremonies/serialize_logs.rs +++ b/packages/windmill/src/services/ceremonies/serialize_logs.rs @@ -7,6 +7,12 @@ use sequent_core::services::date::ISO8601; use sequent_core::types::ceremonies::Log; use tracing::{event, instrument, Level}; +/// Builds a [`Log`] describing who posted `message`, which statement kind it carries, and its batch. +/// +/// # Panics +/// +/// Panics if converting the on-board timestamp to milliseconds would overflow `u64` multiplication +/// by 1000 (`expect("timestamp millis overflow")`). pub fn message_to_log(message: &Message) -> Log { let batch_number = message.statement.get_batch_number(); let timestamp = message @@ -27,6 +33,11 @@ pub fn message_to_log(message: &Message) -> Log { } } +/// Emits each derived log line through the tracing pipeline. +/// +/// # Errors +/// +/// Always returns `Ok`; reserved for future filtering failures. #[instrument(skip(messages), err)] pub fn print_messages(messages: &[Message], board_name: &str) -> Result<()> { let logs: Vec = messages.iter().map(message_to_log).collect(); @@ -40,6 +51,12 @@ pub fn print_messages(messages: &[Message], board_name: &str) -> Result<()> { Ok(()) } +/// Filters `messages` to those at or after `next_timestamp` with batch ids in `batch_ids`, maps +/// them with [`message_to_log`], and returns them sorted by [`sort_logs`]. +/// +/// # Errors +/// +/// Always returns `Ok`; reserved for future validation errors. #[instrument(skip(messages, batch_ids), err)] pub fn generate_logs( messages: &[Message], @@ -60,6 +77,7 @@ pub fn generate_logs( Ok(sort_logs(&logs)) } +/// Seed log line emitted when a tally ceremony row is first created for `election_ids`. #[instrument] pub fn generate_tally_initial_log(election_ids: &Vec) -> Vec { vec![Log { @@ -68,6 +86,7 @@ pub fn generate_tally_initial_log(election_ids: &Vec) -> Vec { }] } +/// Returns a time-ordered copy of `logs`. #[instrument(skip_all)] pub fn sort_logs(logs: &[Log]) -> Vec { let mut sorted = logs.to_owned(); @@ -81,6 +100,7 @@ pub fn sort_logs(logs: &[Log]) -> Vec { sorted } +/// Seed log line emitted when a keys ceremony is created listing participating trustees. #[instrument] pub fn generate_keys_initial_log(trustee_names: &Vec) -> Vec { vec![Log { @@ -89,6 +109,7 @@ pub fn generate_keys_initial_log(trustee_names: &Vec) -> Vec { }] } +/// Appends a “restored private key” line for `trustee_name` during tally trustee reconnect. #[instrument(skip(current_logs))] pub fn append_tally_trustee_log(current_logs: &[Log], trustee_name: &str) -> Vec { let mut logs: Vec = current_logs.to_owned(); @@ -99,6 +120,7 @@ pub fn append_tally_trustee_log(current_logs: &[Log], trustee_name: &str) -> Vec sort_logs(&logs) } +/// Appends a keys-ceremony log entry when a trustee downloads their encrypted private key material. #[instrument(skip(current_logs))] pub fn append_keys_trustee_download_log(current_logs: &[Log], trustee_name: &str) -> Vec { let mut logs: Vec = current_logs.to_owned(); @@ -109,6 +131,7 @@ pub fn append_keys_trustee_download_log(current_logs: &[Log], trustee_name: &str sort_logs(&logs) } +/// Appends a keys-ceremony log entry when a trustee confirms their key matches the board copy. #[instrument(skip(current_logs))] pub fn append_keys_trustee_check_log(current_logs: &[Log], trustee_name: &str) -> Vec { let mut logs: Vec = current_logs.to_owned(); @@ -119,6 +142,7 @@ pub fn append_keys_trustee_check_log(current_logs: &[Log], trustee_name: &str) - sort_logs(&logs) } +/// Appends a log line when tally processing completes for `election_ids`. #[instrument(skip(current_logs))] pub fn append_tally_finished(current_logs: &[Log], election_ids: &[String]) -> Vec { let mut logs: Vec = current_logs.to_owned(); @@ -129,6 +153,7 @@ pub fn append_tally_finished(current_logs: &[Log], election_ids: &[String]) -> V sort_logs(&logs) } +/// Appends a log line when tally ceremony metadata is refreshed for `election_ids`. #[instrument(skip(current_logs))] pub fn append_tally_updated(current_logs: &[Log], election_ids: &[String]) -> Vec { let mut logs: Vec = current_logs.to_owned(); @@ -139,6 +164,7 @@ pub fn append_tally_updated(current_logs: &[Log], election_ids: &[String]) -> Ve sort_logs(&logs) } +/// Appends the standard message recorded when tally execution resumes after an IRV tie resolution. #[instrument(skip(current_logs))] pub fn append_tally_resumed_after_resolution(current_logs: &[Log]) -> Vec { let mut logs: Vec = current_logs.to_owned(); diff --git a/packages/windmill/src/services/ceremonies/tally_ceremony.rs b/packages/windmill/src/services/ceremonies/tally_ceremony.rs index db28ec3f19..f2f25d8c08 100644 --- a/packages/windmill/src/services/ceremonies/tally_ceremony.rs +++ b/packages/windmill/src/services/ceremonies/tally_ceremony.rs @@ -50,6 +50,7 @@ use std::str::FromStr; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Tuple containing the latest execution, owning session, contests, and ballot styles for the requested elections. type LastTallySessionExecutionBundle = ( TallySessionExecution, TallySession, @@ -57,6 +58,12 @@ type LastTallySessionExecutionBundle = ( Vec, ); +/// Loads the latest execution plus tally session metadata, contests, and ballot styles for +/// `tally_session_id` scoped to `election_ids`. +/// +/// # Errors +/// +/// Postgres failures from any of the underlying `get_*` queries. #[instrument(skip(hasura_transaction), err)] pub async fn find_last_tally_session_execution_and_all_related_data( hasura_transaction: &Transaction<'_>, @@ -115,6 +122,11 @@ pub async fn find_last_tally_session_execution_and_all_related_data( ))) } +/// Deserializes the JSON blob stored on a tally execution row into [`TallyCeremonyStatus`]. +/// +/// # Errors +/// +/// Returns `Err` when `input` is `None` or JSON deserialization fails. #[instrument(skip_all, err)] pub fn get_tally_ceremony_status(input: Option) -> Result { input @@ -126,6 +138,12 @@ pub fn get_tally_ceremony_status(input: Option) -> Result, @@ -164,6 +182,8 @@ pub async fn find_keys_ceremony( Ok(keys_ceremony) } +/// Builds the initial [`TallyCeremonyStatus`] snapshot seeded from keys-ceremony trustees and the +/// requested election ids (all elections start in `WAITING` with zero progress). #[instrument] fn generate_initial_tally_status( election_ids: &Vec, @@ -191,6 +211,18 @@ fn generate_initial_tally_status( } } +/// Inserts `tally_session_contest` rows for every `relevant_area_contest`, allocating increasing +/// batch numbers after the current session maximum. +/// +/// # Panics +/// +/// Panics if incrementing the local `BatchNumber` counter overflows (`expect("overflow")`)—only +/// possible with an pathological number of contests in one session. +/// +/// # Errors +/// +/// Postgres failures from `get_tally_session_highest_batch` / `insert_tally_session_contest`, or +/// `Err` when a referenced contest id is missing from `contests_map`. #[instrument(err, skip(hasura_transaction))] pub async fn insert_tally_session_contests( hasura_transaction: &Transaction<'_>, @@ -255,6 +287,8 @@ pub async fn insert_tally_session_contests( Ok(()) } +/// Collects [`AreaContest`] rows whose contests belong to any id in `election_ids`, using the area +/// tree to match descendant contests. fn get_area_contests_for_election_ids( contests_map: &HashMap, area_contests_tree: &TreeNode, @@ -268,6 +302,13 @@ fn get_area_contests_for_election_ids( area_contests_tree.get_contest_matches(&contest_ids) } +/// Validates published elections and permission labels, creates the tally session + first execution, +/// inserts contests, and logs `post_key_insertion_start` to the electoral board. +/// +/// # Errors +/// +/// Permission/publish validation failures, area tree construction errors, keys ceremony issues, +/// Postgres insert failures, missing bulletin boards, or electoral log write errors. #[instrument(err, skip(transaction))] pub async fn create_tally_ceremony( transaction: &Transaction<'_>, @@ -468,6 +509,13 @@ pub async fn create_tally_ceremony( Ok(tally_session_id.clone()) } +/// Validates `new_execution_status` transitions (cancel/resume rules) +/// and updates the tally session execution status. +/// +/// # Errors +/// +/// Illegal transitions, missing prior executions (returns `Ok` early), JSON parsing failures, +/// insufficient trustees when not cancelling, missing `ENV_SLUG`, or electoral log failures. #[instrument(err, skip(hasura_transaction))] pub async fn update_tally_ceremony( hasura_transaction: &Transaction<'_>, @@ -586,6 +634,13 @@ pub async fn update_tally_ceremony( Ok(()) } +/// Verifies the trustee-supplied ciphertext matches the board copy, appends trustee logs, may bump +/// the session to `CONNECTED` once the threshold is met, and posts `post_key_insertion`. +/// +/// # Errors +/// +/// Missing sessions/executions, invalid execution states, unknown trustees, mismatched ciphertext +/// (returns `Ok(false)`), Postgres failures, missing bulletin boards, or electoral log errors. #[instrument(err, skip(transaction))] pub async fn set_private_key( transaction: &Transaction<'_>, @@ -763,6 +818,12 @@ pub async fn set_private_key( Ok(true) } +/// Marks the tally session successful in Postgres when possible and adds an electoral log entry. +/// +/// # Errors +/// +/// Ignores DB errors from the completion update (no `?` on that call), but still returns `Err` from +/// follow-up reads, missing bulletin boards, or electoral log failures when the update succeeds. #[instrument(err, skip(hasura_transaction))] pub async fn set_tally_session_completed( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ceremonies/tally_progress.rs b/packages/windmill/src/services/ceremonies/tally_progress.rs index e9736a60fb..621422599c 100644 --- a/packages/windmill/src/services/ceremonies/tally_progress.rs +++ b/packages/windmill/src/services/ceremonies/tally_progress.rs @@ -10,6 +10,7 @@ use sequent_core::types::{ use std::collections::{HashMap, HashSet}; use tracing::{event, instrument, Level}; +/// Collects distinct batch numbers from `messages` whose statement kind matches `kind`. #[instrument(skip_all)] fn get_session_ids_by_type(messages: &[Message], kind: StatementType) -> Vec { let mut plaintext_batch_ids: Vec = messages @@ -28,6 +29,15 @@ fn get_session_ids_by_type(messages: &[Message], kind: StatementType) -> Vec, } -/// Scans `results_area_contest` rows for the given `results_event_id` and -/// returns any contests whose annotations contain a `pending_tie_resolution`. +/// Scans `results_contest` rows for the given `results_event_id` and returns any contests whose +/// annotations contain a `pending_tie_resolution`. +/// +/// # Errors +/// +/// Invalid UUID parameters, Postgres query failures, or JSON deserialization errors when reading +/// stored `pending_tie_resolution` blobs into [`TallySessionResolutionData`]. pub async fn check_for_tie_resolutions( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -129,6 +134,11 @@ pub async fn check_for_tie_resolutions( /// `tally_paused_pending_resolution` entry to the electoral log. /// /// Returns the IDs of all pending resolution records (empty if no ties detected). +/// +/// # Errors +/// +/// Any failure from [`check_for_tie_resolutions`], resolution insert helpers, missing bulletin +/// board configuration, electoral log construction, or posting `tally_paused_pending_resolution`. pub async fn handle_pending_irv_resolutions( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -216,6 +226,17 @@ pub async fn handle_pending_irv_resolutions( /// Submit multiple tally resolutions for a paused tally (batch operation). /// Returns the number of resolutions processed. +/// +/// # Panics +/// +/// Panics if incrementing the processed-resolution counter overflows `usize` (practically +/// unreachable unless the batch size exceeds platform limits). +/// +/// # Errors +/// +/// Missing tally session or execution status, validation failures from [`validate_resolution_allowed`], +/// missing resolutions, invalid candidate selections, Postgres update failures, or electoral log +/// errors while recording tie outcomes. pub async fn submit_tally_resolution( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -409,6 +430,12 @@ pub async fn submit_tally_resolution( /// Returns `Err` if the tally is not awaiting input AND at least one of the /// requested contest IDs does not yet have a resolved record in `all_resolutions`. +/// +/// # Errors +/// +/// Returns `Err((Status::BadRequest, message))` when the session is not in +/// [`TallyExecutionStatus::AWAITING_INPUT`] and at least one `input_contest_ids` entry lacks a +/// resolved [`TallySessionResolution`] in `all_resolutions`. pub fn validate_resolution_allowed( execution_status: &TallyExecutionStatus, input_contest_ids: &[&str], @@ -432,6 +459,11 @@ pub fn validate_resolution_allowed( } /// Extracts the list of tied candidate IDs from a resolution's `resolution_data` field. +/// +/// # Errors +/// +/// Returns `Err((Status::BadRequest, ...))` when `resolution_data` is missing or does not carry +/// `tied_candidate_ids`. pub fn extract_tied_candidate_ids( resolution: &TallySessionResolution, contest_id: &str, diff --git a/packages/windmill/src/services/ceremonies/tally_session_error.rs b/packages/windmill/src/services/ceremonies/tally_session_error.rs index 02cc21af44..8004d1ef1d 100644 --- a/packages/windmill/src/services/ceremonies/tally_session_error.rs +++ b/packages/windmill/src/services/ceremonies/tally_session_error.rs @@ -1,8 +1,9 @@ -use crate::postgres::tally_session_execution::get_tally_session_executions; -use crate::postgres::tally_session_execution::insert_tally_session_execution; // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +use crate::postgres::tally_session_execution::get_tally_session_executions; +use crate::postgres::tally_session_execution::insert_tally_session_execution; use crate::services::database::get_hasura_pool; use anyhow::{anyhow, Context, Result}; use deadpool_postgres::Client as DbClient; @@ -13,6 +14,15 @@ use tracing::{event, info, instrument, Level}; use super::tally_ceremony::get_tally_ceremony_status; +/// Records `error` on the most recent tally execution for `tally_session_id`, deduplicating +/// identical consecutive messages by refreshing their timestamp instead of appending twice. +/// +/// If there is no prior execution row, logs and returns `Ok` without writing. +/// +/// # Errors +/// +/// - Pool acquisition, transaction, Postgres query/insert, or commit failures. +/// - Status JSON parsing failures from [`get_tally_ceremony_status`]. #[instrument(err)] pub async fn handle_tally_session_error( error: &str, diff --git a/packages/windmill/src/services/ceremonies/velvet_tally.rs b/packages/windmill/src/services/ceremonies/velvet_tally.rs index c73675ca69..c64e3a1b39 100644 --- a/packages/windmill/src/services/ceremonies/velvet_tally.rs +++ b/packages/windmill/src/services/ceremonies/velvet_tally.rs @@ -70,7 +70,9 @@ use velvet::pipes::pipe_inputs::{ }; use velvet::pipes::pipe_name::PipeName; +/// Inputs for one area/contest pair. #[derive(Debug, Clone)] +#[allow(missing_docs)] pub struct AreaContestDataType { pub plaintexts: Vec<::P>, pub last_tally_session_execution: TallySessionContest, @@ -81,6 +83,8 @@ pub struct AreaContestDataType { pub auditable_votes: u64, } +/// Converts each plaintext vector to the decimal big-integer string Velvet expects in `ballots.csv`, +/// logging decode failures instead of failing the whole batch. #[instrument(skip_all)] fn decode_plaintexts_to_biguints( plaintexts: &Vec<::P>, @@ -118,6 +122,13 @@ fn decode_plaintexts_to_biguints( .collect::>() } +/// Writes ballots CSV, area/contest JSON configs, and optional tally-sheet files under +/// `base_tempdir/input` for a single [`AreaContestDataType`]. +/// +/// # Errors +/// +/// Filesystem errors, JSON serialization failures, UUID parse errors in config builders, or CSV I/O +/// issues while appending multi-contest ballots. #[instrument(skip_all, err)] pub fn prepare_tally_for_area_contest( base_tempdir: PathBuf, @@ -245,6 +256,12 @@ pub fn prepare_tally_for_area_contest( Ok(()) } +/// Aggregates [`AreaContestDataType`] rows into deduplicated [`ElectionConfig`] structures and writes +/// `election-config.json` for each election under `base_tempdir/input/default/configs`. +/// +/// # Errors +/// +/// UUID parsing, date computation failures, or filesystem/serialization errors while writing JSON. #[instrument(skip_all, err)] pub fn create_election_configs_blocking( base_tempdir: PathBuf, @@ -369,6 +386,12 @@ pub fn create_election_configs_blocking( Ok(()) } +/// Loads elections and scheduled events, +/// and runs `create_election_configs_blocking` in a blocking task. +/// +/// # Errors +/// +/// Pool/transaction acquisition failures, export query errors, or join errors from the blocking task. #[instrument(skip_all, err)] pub async fn create_election_configs( base_tempdir: PathBuf, @@ -443,6 +466,11 @@ pub async fn create_election_configs( handle.await? } +/// Generates an initial Velvet state for the given pipe id. +/// +/// # Errors +/// +/// CLI validation errors or failures constructing Velvet runtime state. #[instrument(err)] pub fn generate_initial_state(base_tally_path: &PathBuf, pipe_id: &str) -> Result { let cli = CliRun { @@ -458,6 +486,11 @@ pub fn generate_initial_state(base_tally_path: &PathBuf, pipe_id: &str) -> Resul State::new(&cli, &config).map_err(|err| anyhow!("{err}")) } +/// Repeatedly runs Velvet `exec_next` stages on a blocking pool until the pipeline reports completion. +/// +/// # Errors +/// +/// Missing Velvet stages, spawn/join failures, or errors returned from Velvet execution steps. #[instrument(err)] pub async fn call_velvet(base_tally_path: PathBuf, pipe_id: &str) -> Result { let mut state_opt = Some(generate_initial_state(&base_tally_path, pipe_id)?); @@ -500,13 +533,23 @@ pub async fn call_velvet(base_tally_path: PathBuf, pipe_id: &str) -> Result Vec { /// Parses a single PEM-encoded X.509 certificate and extracts its metadata /// using OpenSSL command-line tools. +/// +/// # Errors +/// +/// Returns an error if one of the operations fails. pub fn parse_certificate_pem(pem: &str) -> Result { let cert_temp_file = generate_temp_file("cert", ".pem").with_context(|| "Error creating temp PEM file")?; @@ -91,6 +100,7 @@ pub fn parse_certificate_pem(pem: &str) -> Result { parse_openssl_x509_output(&output, pem) } +/// Parses openssl x509 output from external representation. fn parse_openssl_x509_output(output: &str, pem: &str) -> Result { let mut subject = String::new(); let mut issuer = String::new(); @@ -158,6 +168,10 @@ pub fn extract_cn(rdns: &str) -> Option { } /// Parses the OpenSSL date format: "Jan 1 00:00:00 2020 GMT" +/// +/// # Errors +/// +/// Returns an error if parsing the date fails. pub fn parse_openssl_date(date_str: &str) -> Result> { let dt = NaiveDateTime::parse_from_str(date_str.trim(), "%b %e %H:%M:%S %Y %Z") .with_context(|| format!("Unrecognised date format: '{date_str}'"))?; diff --git a/packages/windmill/src/services/cloudflare.rs b/packages/windmill/src/services/cloudflare.rs index 11b87df271..065df88e77 100644 --- a/packages/windmill/src/services/cloudflare.rs +++ b/packages/windmill/src/services/cloudflare.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Cloudflare API helpers for DNS records and custom WAF rulesets. + use reqwest::Client; use sequent_core::serialization::deserialize_with_path::deserialize_str; use serde::{Deserialize, Serialize}; @@ -8,9 +11,12 @@ use std::error::Error; use std::fmt; use tracing::{info, instrument}; +/// Cloudflare WAF ruleset phase. pub const WAF_RULESET_PHASE: &str = "http_request_firewall_custom"; #[derive(Debug, Deserialize)] +/// Api response for Cloudflare. +#[allow(missing_docs)] pub struct ApiResponse { pub success: bool, pub result: T, @@ -19,6 +25,9 @@ pub struct ApiResponse { } #[derive(Debug, Deserialize)] +/// Cloudflare ruleset. +#[allow(clippy::missing_docs_in_private_items)] +#[allow(missing_docs)] pub struct Ruleset { description: String, pub id: String, @@ -31,6 +40,8 @@ pub struct Ruleset { } #[derive(Debug, Deserialize)] +/// Get rulesets response. +#[allow(clippy::missing_docs_in_private_items)] pub struct GetRulesetsResponse { description: String, id: String, @@ -42,6 +53,8 @@ pub struct GetRulesetsResponse { } #[derive(Debug, Deserialize, Serialize)] +/// Create ruleset request. +#[allow(clippy::missing_docs_in_private_items)] pub struct CreateRulesetRequest { description: String, name: String, @@ -51,6 +64,8 @@ pub struct CreateRulesetRequest { } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Cloudflare rule. +#[allow(missing_docs)] pub struct Rule { pub id: Option, pub expression: String, @@ -61,6 +76,8 @@ pub struct Rule { } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Create custom rule request. +#[allow(missing_docs)] pub struct CreateCustomRuleRequest { pub expression: String, pub description: String, @@ -68,11 +85,14 @@ pub struct CreateCustomRuleRequest { } #[derive(Debug)] +/// Cloudflare error. +#[allow(missing_docs)] pub struct CloudflareError { pub details: String, } impl CloudflareError { + /// Create a new Cloudflare error. pub fn new(msg: &str) -> CloudflareError { CloudflareError { details: msg.to_string(), @@ -89,6 +109,11 @@ impl fmt::Display for CloudflareError { impl Error for CloudflareError {} #[instrument] +/// Get Cloudflare variables. +/// +/// # Errors +/// +/// Returns an error if required environment variables are missing. pub fn get_cloudflare_vars() -> Result<(String, String), Box> { let cloudflare_zone = std::env::var("CLOUDFLARE_ZONE") .map_err(|_e| "Missing cloudflare env variable".to_string())?; @@ -99,6 +124,10 @@ pub fn get_cloudflare_vars() -> Result<(String, String), Box> { } #[instrument] +/// List Cloudflare rulesets for a given zone. +/// # Errors +/// +/// Returns an error if the request fails or the response cannot be parsed. pub async fn list_rulesets( api_key: &str, zone_id: &str, @@ -132,6 +161,10 @@ pub async fn list_rulesets( } #[instrument] +/// Get Cloudflare ruleset by id. +/// # Errors +/// +/// Returns an error if the request fails or the response cannot be parsed. pub async fn get_ruleset_by_id( api_key: &str, zone_id: &str, @@ -166,6 +199,10 @@ pub async fn get_ruleset_by_id( } #[instrument] +/// Get Cloudflare ruleset by phase. +/// # Errors +/// +/// Returns an error if listing or fetching rulesets fails. pub async fn get_ruleset_by_phase( api_key: &str, zone_id: &str, @@ -192,6 +229,10 @@ pub async fn get_ruleset_by_phase( } #[instrument] +/// Create a new Cloudflare ruleset. +/// # Errors +/// +/// Returns an error if the request fails or the response cannot be parsed. pub async fn create_ruleset( api_key: &str, zone_id: &str, @@ -236,6 +277,10 @@ pub async fn create_ruleset( } #[instrument] +/// Create a new Cloudflare rule in a ruleset. +/// # Errors +/// +/// Returns an error if the request fails. pub async fn create_ruleset_rule( api_key: &str, zone_id: &str, @@ -269,6 +314,10 @@ pub async fn create_ruleset_rule( } #[instrument] +/// Update a Cloudflare rule in a ruleset. +/// # Errors +/// +/// Returns an error if the request fails. pub async fn update_ruleset_rule( api_key: &str, zone_id: &str, @@ -303,6 +352,10 @@ pub async fn update_ruleset_rule( } #[instrument] +/// Delete a Cloudflare rule in a ruleset. +/// # Errors +/// +/// Returns an error if the request fails. pub async fn delete_ruleset_rule( api_key: &str, zone_id: &str, diff --git a/packages/windmill/src/services/compress.rs b/packages/windmill/src/services/compress.rs index 44c29357dc..ca10d65100 100644 --- a/packages/windmill/src/services/compress.rs +++ b/packages/windmill/src/services/compress.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Folder compression and decompression helpers. + use crate::types::error::Result; use anyhow::Context; use flate2::read::GzDecoder; @@ -17,6 +20,10 @@ use tracing::{event, instrument, Level}; /// # Arguments /// * `folder_path` - The path to the folder to be archived. /// * `compress` - If true, creates a compressed .tar.gz file; otherwise, creates an uncompressed .tar file. +/// +/// # Errors +/// +/// Returns an error if the folder doesn't exist, temporary file creation fails, or archiving fails. #[instrument(err)] pub fn create_archive_from_folder( folder_path: &Path, @@ -82,6 +89,10 @@ pub fn create_archive_from_folder( /// * `file_path` - The path to the .tar.gz or .tar file to be decompressed/extracted. /// * `is_compressed` - If true, assumes the file is a .tar.gz and decompresses it; /// otherwise, assumes it's a .tar file and extracts it directly. +/// +/// # Errors +/// +/// Returns an error if the archive cannot be opened, decompressed, or unpacked. #[instrument(err)] pub fn extract_archive_to_temp_dir(file_path: &Path, is_compressed: bool) -> Result { let temp_dir = diff --git a/packages/windmill/src/services/consolidation/acm_json.rs b/packages/windmill/src/services/consolidation/acm_json.rs index 218069bb2c..b5c5600819 100644 --- a/packages/windmill/src/services/consolidation/acm_json.rs +++ b/packages/windmill/src/services/consolidation/acm_json.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Loads or creates per-event ECIES key pairs and builds ACM JSON sidecars for encrypted packages. + use super::{ eml_generator::{ find_miru_annotation, MiruAreaAnnotations, MiruElectionAnnotations, @@ -24,29 +27,43 @@ use sequent_core::{ use std::env; use tracing::instrument; +/// `strftime`-style format for human-readable timestamps embedded in ACM JSON. const ACM_JSON_FORMAT: &str = "%m/%d/%Y %I:%M:%S %p"; +/// Default Miru device id when `MIRU_DEVICE_ID` is unset. const DEFAULT_MIRU_DEVICE_ID: &str = "SQUNT420535311"; +/// Default Miru hardware serial when `MIRU_SERIAL_NUMBER` is unset. const DEFAULT_MIRU_SERIAL_NUMBER: &str = "SEQ-NT-52706782"; //const DEFAULT_MIRU_STATION_NAME: &str = "2094A,5346A,6588A,7474A,1489A"; +/// Default placeholder LAN address for ACM metadata when `MIRU_IP_ADDRESS` is unset. const DEFAULT_MIRU_IP_ADDRESS: &str = "192.168.1.67"; +/// Default placeholder MAC when `_MIRU_MAC_ADDRESS` is unset. const DEFAULT_MIRU_MAC_ADDRESS: &str = "3C:7E:5A:89:4D:2F"; +/// Reads `MIRU_DEVICE_ID` or falls back to [`DEFAULT_MIRU_DEVICE_ID`]. pub fn get_miru_device_id() -> String { env::var("MIRU_DEVICE_ID").unwrap_or(DEFAULT_MIRU_DEVICE_ID.to_string()) } +/// Reads `MIRU_SERIAL_NUMBER` or falls back to [`DEFAULT_MIRU_SERIAL_NUMBER`]. pub fn get_miru_serial_number() -> String { env::var("MIRU_SERIAL_NUMBER").unwrap_or(DEFAULT_MIRU_SERIAL_NUMBER.to_string()) } +/// Reads `MIRU_IP_ADDRESS` or falls back to [`DEFAULT_MIRU_IP_ADDRESS`]. pub fn get_miru_ip_address() -> String { env::var("MIRU_IP_ADDRESS").unwrap_or(DEFAULT_MIRU_IP_ADDRESS.to_string()) } +/// Reads `_MIRU_MAC_ADDRESS` or falls back to [`DEFAULT_MIRU_MAC_ADDRESS`]. pub fn get_miru_mac_address() -> String { env::var("_MIRU_MAC_ADDRESS").unwrap_or(DEFAULT_MIRU_MAC_ADDRESS.to_string()) } +/// Loads the tenant/event ECIES key pair from the vault, or generates and stores a new one. +/// +/// # Errors +/// +/// Vault I/O, JSON (de)serialization, or key generation failures. #[instrument(err)] pub async fn get_acm_key_pair( hasura_transaction: &Transaction<'_>, @@ -83,6 +100,11 @@ pub async fn get_acm_key_pair( } } +/// Fills an [`ACMJson`] struct for a completed transmission or log package using Miru env defaults. +/// +/// # Errors +/// +/// Timestamp formatting failures from [`generate_timestamp`]. #[instrument(skip(election_event_annotations), err)] pub fn generate_acm_json( sha256_hash: &str, diff --git a/packages/windmill/src/services/consolidation/acm_transaction.rs b/packages/windmill/src/services/consolidation/acm_transaction.rs index 28e7694f7a..c62b3fa0d1 100644 --- a/packages/windmill/src/services/consolidation/acm_transaction.rs +++ b/packages/windmill/src/services/consolidation/acm_transaction.rs @@ -1,14 +1,22 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Deterministic pseudo-transaction ids for Miru EML headers. + use chrono::Utc; use chrono::{Datelike, Timelike}; use rand::Rng; -// the random part comes from dividing 9999999999999 / (24*365*24*3600) +/// The random part comes from dividing 9999999999999 / (24*365*24*3600) const RANDOM_PART: u64 = 13212; -// generate a 13 digit number like 1721184531864 +/// Builds a 13-digit number like 1721184531864 +/// +/// # Panics +/// +/// Panics only if arithmetic around year/hour/second components or the final product overflows +/// (should not occur for real wall-clock times). pub fn generate_transaction_id() -> u64 { let now = Utc::now(); let year = (now.year() as u64) diff --git a/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs b/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs index 1f392fcad1..ea9dd8bf8c 100644 --- a/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs +++ b/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! File encryption compatible with `openssl enc -aes-256-cbc` (MD5 KDF). + use anyhow::{anyhow, Context, Result}; use sequent_core::signatures::shell::run_shell_command; use std::process::Command; @@ -9,6 +12,11 @@ use tracing::{info, instrument}; // used to recreate this command: // openssl enc -aes-256-cbc -e -in $input_file_path -out $output_file_path -pass pass:$password -md md5 +/// Encrypts a file in place on disk using AES-256-CBC via `openssl enc`. +/// +/// # Errors +/// +/// Missing `openssl`, I/O errors, or non-zero exit status from `openssl`. #[instrument(skip(password), err)] pub fn encrypt_file_aes_256_cbc( input_file_path: &str, @@ -38,6 +46,11 @@ pub fn encrypt_file_aes_256_cbc( Ok(()) } +/// Decrypts a file produced by [`encrypt_file_aes_256_cbc`]. +/// +/// # Errors +/// +/// Same as encryption: `openssl` failures or I/O errors. #[instrument(skip(password), err)] pub fn decrypt_file_aes_256_cbc( input_file_path: &str, diff --git a/packages/windmill/src/services/consolidation/create_transmission_package_service.rs b/packages/windmill/src/services/consolidation/create_transmission_package_service.rs index f0f9d23093..720af2ef3e 100644 --- a/packages/windmill/src/services/consolidation/create_transmission_package_service.rs +++ b/packages/windmill/src/services/consolidation/create_transmission_package_service.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Builds transmission zip bundles from tally artifacts and updates tally annotations. + use super::acm_json::get_acm_key_pair; use super::acm_transaction::generate_transaction_id; use super::eml_generator::{ @@ -53,6 +56,11 @@ use tracing::{info, instrument}; use uuid::Uuid; use velvet::pipes::generate_reports::ReportData; +/// Resolves the latest tally execution’s `TarGzOriginal` result document and downloads it to a temp file. +/// +/// # Errors +/// +/// DB lookup failures, missing execution or document, or download errors. #[instrument(skip(hasura_transaction), err)] pub async fn download_tally_tar_gz_to_file( hasura_transaction: &Transaction<'_>, @@ -103,6 +111,11 @@ pub async fn download_tally_tar_gz_to_file( get_document_as_temp_file(tenant_id, &document).await } +/// Replaces one area/election entry in tally-session MIRU transmission data and persists annotations. +/// +/// # Errors +/// +/// Deserialization, missing annotations, or DB update failures. #[instrument(skip(hasura_transaction), err)] pub async fn update_transmission_package_annotations( hasura_transaction: &Transaction<'_>, @@ -145,6 +158,11 @@ pub async fn update_transmission_package_annotations( Ok(()) } +/// For each CCS server, writes `er_` / optional `al_` zips under a temp tree, then uploads `all_servers.zip`. +/// +/// # Errors +/// +/// ACM key load, package build, zip, upload, or filesystem errors. #[instrument(skip_all, err)] pub async fn generate_all_servers_document( hasura_transaction: &Transaction<'_>, @@ -229,6 +247,12 @@ pub async fn generate_all_servers_document( Ok(document) } +/// End-to-end: load event/area/tally state, render EML, sign, upload documents, +/// and update MIRU annotations unless `force` is false and transmission package already exists. +/// +/// # Errors +/// +/// Any failure in annotation validation, Velvet/tally prep, document upload, or persistence. #[instrument(err)] pub async fn create_transmission_package_service( tenant_id: &str, diff --git a/packages/windmill/src/services/consolidation/eml_generator.rs b/packages/windmill/src/services/consolidation/eml_generator.rs index 5be66c73cd..81e87c2335 100644 --- a/packages/windmill/src/services/consolidation/eml_generator.rs +++ b/packages/windmill/src/services/consolidation/eml_generator.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Maps Velvet report rows and Miru-prefixed Hasura annotations into EML-shaped JSON. + use super::eml_types::*; use crate::types::miru_plugin::*; use anyhow::{anyhow, Context, Result}; @@ -21,57 +24,93 @@ use strum_macros::{Display, EnumString, ToString}; use tracing::{info, instrument}; use velvet::pipes::{do_tally::ContestResult, generate_reports::ReportData}; +/// Namespace prefix for MIRU plugin keys in [`Annotations`] maps (`miru:…`). pub const MIRU_PLUGIN_PREPEND: &str = "miru"; +/// Annotation suffix: election event id string. pub const MIRU_ELECTION_EVENT_ID: &str = "election-event-id"; +/// Annotation suffix: election event display name. pub const MIRU_ELECTION_EVENT_NAME: &str = "election-event-name"; +/// Annotation suffix: election id (within event). const MIRU_ELECTION_ID: &str = "election-id"; +/// Annotation suffix: election display name. const MIRU_ELECTION_NAME: &str = "election-name"; +/// Annotation suffix: contest id. const MIRU_CONTEST_ID: &str = "contest-id"; +/// Annotation suffix: contest display name. const MIRU_CONTEST_NAME: &str = "contest-name"; +/// Annotation suffix: candidate id. const MIRU_CANDIDATE_ID: &str = "candidate-id"; +/// Annotation suffix: candidate display name. const MIRU_CANDIDATE_NAME: &str = "candidate-name"; +/// Annotation suffix: candidate status/setting code. const MIRU_CANDIDATE_SETTING: &str = "candidate-setting"; +/// Annotation suffix: affiliation id for the candidate’s party. const MIRU_CANDIDATE_AFFILIATION_ID: &str = "candidate-affiliation-id"; +/// Annotation suffix: registered affiliation name. const MIRU_CANDIDATE_AFFILIATION_REGISTERED_NAME: &str = "candidate-affiliation-registered-name"; +/// Annotation suffix: party label. const MIRU_CANDIDATE_AFFILIATION_PARTY: &str = "candidate-affiliation-party"; +/// Annotation suffix: JSON list of [`MiruCcsServer`] destinations. pub const MIRU_AREA_CCS_SERVERS: &str = "area-ccs-servers"; +/// Annotation suffix: precinct / station id string. pub const MIRU_AREA_STATION_ID: &str = "area-station-id"; +/// Annotation suffix: station display name. pub const MIRU_AREA_STATION_NAME: &str = "area-station-name"; +/// Annotation suffix: numeric threshold string for MIRU policy. pub const MIRU_AREA_THRESHOLD: &str = "area-threshold"; +/// Annotation suffix: JSON list of SBEI usernames allowed for this area. pub const MIRU_AREA_TRUSTEE_USERS: &str = "area-trustee-users"; +/// Annotation suffix: country code or name for the area. pub const MIRU_AREA_COUNTRY: &str = "area-country"; +/// Annotation suffix: registered voter count for the precinct. pub const MIRU_AREA_REGISTERED_VOTERS: &str = "registered-voters"; +/// Annotation suffix: JSON [`MiruTallySessionData`] blob on the tally session. pub const MIRU_TALLY_SESSION_DATA: &str = "tally-session-data"; +/// Annotation suffix: trustee id (legacy / display). pub const MIRU_TRUSTEE_ID: &str = "trustee-id"; +/// Annotation suffix: trustee display name. pub const MIRU_TRUSTEE_NAME: &str = "trustee-name"; +/// Annotation suffix: JSON list of [`MiruSbeiUser`] on the election event. pub const MIRU_SBEI_USERS: &str = "sbei-users"; +/// Annotation suffix: PEM root CA for optional client cert validation. pub const MIRU_ROOT_CA: &str = "root-ca"; +/// Annotation suffix: intermediate CA bundle text. pub const MIRU_INTERMEDIATE_CAS: &str = "intermediate-cas"; +/// Annotation suffix: `"true"` / `"false"` — validate client certs against CA store. pub const MIRU_USE_ROOT_CA: &str = "use-root-ca"; +/// `chrono`-style format for EML `issue_date`. const ISSUE_DATE_FORMAT: &str = "%Y-%m-%dT%H:%M:%S"; +/// Date-only format for official status timestamp in EML. const OFFICIAL_STATUS_DATE_FORMAT: &str = "%Y-%m-%d"; -/*COMELEC ELECTION DATA -> to be change if revice different keys */ +// COMELEC-style geographic keys; adjust if a different jurisdiction’s EML mapping is needed. +/// Annotation suffix: geographical region label for the election post. pub const MIRU_GEOGRAPHICAL_REGION: &str = "geographical-region"; +/// Annotation suffix: voting center / post name. pub const MIRU_VOTING_CENTER: &str = "voting-center"; +/// Annotation suffix: precinct code. pub const MIRU_PRECINCT_CODE: &str = "precinct-code"; +/// Map key (no `miru:` prefix in code paths): poll center code. pub const MIRU_POLLCENTER_CODE: &str = "pollcenter_code"; -/**/ +/// EML official-status enumeration (serialized lowercase). #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, EnumString, Display)] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum OfficialStatus { + /// Final official results (not provisional). OFFICIAL, } +/// Builds COMELEC-style [`EMLCountMetric`] rows from a Velvet contest result. pub trait GetMetrics { + /// Fills standard metric ids (over/under votes, registered voters, etc.) for one contest. fn get_metrics(&self, registered_voters: i64) -> Vec; } -// TODO: review impl GetMetrics for ContestResult { + /// Builds the fixed set of COMELEC count metrics (over/under votes, RV, valid, etc.). #[instrument(skip_all, name = "ContestResult::get_metrics")] fn get_metrics(&self, registered_voters: i64) -> Vec { let extended_metrics = self.extended_metrics.unwrap_or_default(); @@ -166,15 +205,32 @@ impl GetMetrics for ContestResult { } } +/// Parses MIRU-prefixed keys from a [`sequent_core::ballot::Annotations`] map into a typed `Item`. pub trait ValidateAnnotations { + /// Strongly typed view produced from annotations (event, area, election, …). type Item; + /// Requires all expected MIRU keys; returns an error if any are missing or JSON is invalid. + /// + /// # Errors + /// + /// Missing annotation map, missing keys, or deserialize failures. fn get_annotations(&self) -> Result; + /// Like [`get_annotations`](Self::get_annotations) but fills defaults when the map or keys are absent. + /// + /// # Errors + /// + /// Parse errors on present-but-invalid values (implementation-defined). fn get_annotations_or_empty_values(&self) -> Result { self.get_annotations() } } +/// Returns `Err` if any `keys` are absent from `annotations`. +/// +/// # Errors +/// +/// The first missing key produces an error. #[instrument(err, skip(annotations))] fn check_annotations_exist(keys: Vec, annotations: &Annotations) -> Result<()> { for key in keys { @@ -185,19 +241,29 @@ fn check_annotations_exist(keys: Vec, annotations: &Annotations) -> Resu Ok(()) } +/// MIRU fields stored on the election event (ids, SBEI roster, optional TLS trust material). #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct MiruElectionEventAnnotations { + /// Election event id (annotation `miru:election-event-id`). pub event_id: String, + /// Election event display name. pub event_name: String, + /// Configured SBEI users for signing and CCS workflow. pub sbei_users: Vec, + /// PEM root CA text when validating client certificates. pub root_ca: String, + /// Intermediate CA PEM(s) or bundle text. pub intermediate_cas: String, + /// Whether to enforce CA validation for P12 uploads. pub use_root_ca: bool, } impl ValidateAnnotations for ElectionEvent { type Item = MiruElectionEventAnnotations; + /// # Errors + /// + /// Missing or invalid election-event annotations or embedded JSON lists. #[instrument(skip_all, err, name = "ElectionEvent::get_annotations")] fn get_annotations(&self) -> Result { let annotations_js = self @@ -270,6 +336,10 @@ impl ValidateAnnotations for ElectionEvent { use_root_ca: "true" == use_root_ca.as_str(), }) } + + /// # Errors + /// + /// Deserialize failures when partial annotation values are malformed. #[instrument(err, skip_all)] fn get_annotations_or_empty_values(&self) -> Result { let annotations_js = self @@ -308,19 +378,29 @@ impl ValidateAnnotations for ElectionEvent { } } +/// Geographic and naming metadata for one election post (COMELEC-oriented fields). #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct MiruElectionAnnotations { + /// Election id within the event. pub election_id: String, + /// Election display name. pub election_name: String, + /// Region / geographical label. pub geographical_area: String, + /// Voting center / post label. pub post: String, + /// Precinct code string. pub precinct_code: String, + /// Poll center code (unprefixed map key in annotations). pub pollcenter_code: String, } impl ValidateAnnotations for core::Election { type Item = MiruElectionAnnotations; + /// # Errors + /// + /// Missing election annotations or required MIRU keys (including poll center). #[instrument(skip_all, err, name = "Election::get_annotations")] fn get_annotations(&self) -> Result { let annotations_js = self @@ -387,6 +467,9 @@ impl ValidateAnnotations for core::Election { }) } + /// # Errors + /// + /// JSON deserialization errors when the annotation map is non-empty but invalid. #[instrument(err, skip_all)] fn get_annotations_or_empty_values(&self) -> Result { let annotations_js = self @@ -427,20 +510,31 @@ impl ValidateAnnotations for core::Election { } } +/// Precinct-level MIRU config: CCS endpoints, station ids, SBEI allowlist, registered voter count. #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct MiruAreaAnnotations { + /// Destinations for `er_` / `al_` uploads. pub ccs_servers: Vec, + /// Station / precinct id string. pub station_id: String, + /// Human-readable station name. pub station_name: String, + /// Policy threshold parsed from annotations. pub threshold: i64, - pub sbei_ids: Vec, // the miru id of the sbei user, the election event has their annotations + /// Miru ids of SBEI users permitted for this area (cross-reference event `sbei_users`). + pub sbei_ids: Vec, + /// Country field for EML / reporting. pub country: String, - pub registered_voters: i64, // registered voters at a given precinct id + /// Registered voters for metric generation in EML. + pub registered_voters: i64, } impl ValidateAnnotations for core::Area { type Item = MiruAreaAnnotations; + /// # Errors + /// + /// Missing area annotations, parse errors for numeric fields, or invalid embedded JSON lists. #[instrument(skip_all, err, name = "Area::get_annotations")] fn get_annotations(&self) -> Result { let annotations_js = self @@ -524,6 +618,9 @@ impl ValidateAnnotations for core::Area { }) } + /// # Errors + /// + /// JSON or numeric parse errors when optional fields are present but malformed. #[instrument(err, skip_all)] fn get_annotations_or_empty_values(&self) -> Result { let annotations_js = self @@ -578,6 +675,9 @@ impl ValidateAnnotations for core::Area { impl ValidateAnnotations for core::TallySession { type Item = MiruTallySessionData; + /// # Errors + /// + /// Missing tally annotations or invalid `miru:tally-session-data` JSON. #[instrument(skip_all, err, name = "TallySession::get_annotations")] fn get_annotations(&self) -> Result { let annotations_js = self @@ -600,6 +700,9 @@ impl ValidateAnnotations for core::TallySession { Ok(tally_session_data) } + /// # Errors + /// + /// Rare: fails if an empty-default path deserializes invalidly (normally returns an empty vec). #[instrument(err, skip_all)] fn get_annotations_or_empty_values(&self) -> Result { let annotations_js = self @@ -616,15 +719,21 @@ impl ValidateAnnotations for core::TallySession { } } +/// Display id and title for a contest row in EML. #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct MiruContestAnnotations { + /// Contest title from MIRU annotations. pub contest_name: String, + /// Contest id from MIRU annotations. pub contest_id: String, } impl ValidateAnnotations for Contest { type Item = MiruContestAnnotations; + /// # Errors + /// + /// Missing contest annotations or required MIRU keys. #[instrument(skip_all, err, name = "Contest::get_annotations")] fn get_annotations(&self) -> Result { let annotations = self @@ -657,19 +766,29 @@ impl ValidateAnnotations for Contest { } } +/// Candidate and party fields mirrored into EML from MIRU contest annotations. #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct MiruCandidateAnnotations { + /// Display name. pub candidate_name: String, + /// Candidate id string. pub candidate_id: String, + /// Status / setting code for the row. pub candidate_setting: String, + /// Party id string. pub candidate_affiliation_id: String, + /// Registered party name. pub candidate_affiliation_registered_name: String, + /// Party label / acronym. pub candidate_affiliation_party: String, } impl ValidateAnnotations for Candidate { type Item = MiruCandidateAnnotations; + /// # Errors + /// + /// Missing candidate annotations or any required affiliation keys. #[instrument(skip_all, err, name = "Candidate::get_annotations")] fn get_annotations(&self) -> Result { let annotations = self @@ -742,11 +861,16 @@ impl ValidateAnnotations for Candidate { } } +/// Returns `miru:{data}` for use as an [`Annotations`] map key. #[instrument] pub fn prepend_miru_annotation(data: &str) -> String { format!("{MIRU_PLUGIN_PREPEND}:{data}") } +/// Looks up `miru:{data}`. +/// # Errors +/// +/// Missing key in `annotations`. #[instrument(err, skip(annotations))] pub fn find_miru_annotation(data: &str, annotations: &Annotations) -> Result { let key = prepend_miru_annotation(data); @@ -756,12 +880,26 @@ pub fn find_miru_annotation(data: &str, annotations: &Annotations) -> Result Result> { let key = prepend_miru_annotation(data); Ok(annotations.get(&key).cloned()) } +/// Maps one Velvet [`ReportData`] row into an [`EMLContest`] (metrics + selections). +/// +/// # Panics +/// +/// If `report.contest` or `report.contest_result` is `None` (caller must supply a complete report). +/// +/// # Errors +/// +/// Contest or candidate annotation validation failures, or errors mapping candidate rows. #[instrument(err, skip_all)] pub fn render_eml_contest( report: &ReportData, @@ -831,6 +969,11 @@ pub fn render_eml_contest( Ok(contests) } +/// Assembles the root [`EMLFile`] for a tally: header timestamps, event/election ids, and all contests. +/// +/// # Errors +/// +/// Failures from [`render_eml_contest`] when iterating `reports`. #[instrument(err, skip(election_event_annotations, election_annotations, reports))] pub fn render_eml_file( tally_id: &str, diff --git a/packages/windmill/src/services/consolidation/eml_types.rs b/packages/windmill/src/services/consolidation/eml_types.rs index 7cbfa9cc2a..7c4c4f9a8f 100644 --- a/packages/windmill/src/services/consolidation/eml_types.rs +++ b/packages/windmill/src/services/consolidation/eml_types.rs @@ -1,115 +1,181 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Serializable shapes for ACM sidecar JSON and the EML tally XML payload (Philippines COMELEC-style fields). + use serde::{Deserialize, Serialize}; +/// Trustee line in ACM `members`: id, display name, and optional signature material. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct ACMTrustee { + /// Stable trustee / SBEI identifier in MIRU. pub id: String, + /// Base64 or PEM signature over the payload, if present. #[serde(skip_serializing_if = "Option::is_none")] pub signature: Option, + /// Public key material for verification, if present. #[serde(skip_serializing_if = "Option::is_none")] pub publickey: Option, + /// Human-readable trustee name. pub name: String, } +/// ACM JSON metadata shipped beside encrypted election-results or audit-log zips. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct ACMJson { + /// Device identifier from station configuration. pub device_id: String, + /// Hardware serial number string. pub serial_number: String, + /// Precinct / station id. pub station_id: String, + /// Station display name. pub station_name: String, + /// Election event id (COMELEC event scope). pub event_id: String, + /// Election event title. pub event_name: String, + /// Uppercase hex SHA-256 of the cleartext payload (e.g. EML or logs JSON). pub sha256_hash: String, + /// ECIES-wrapped symmetric key for the `.exz` blob (base64). pub encrypted_key: String, + /// Co-signers / trustees for this package. pub members: Vec, + /// Reported IP of the sending station. pub ip_address: String, + /// Reported MAC of the sending station. pub mac_address: String, + /// Timestamp string for the election-results moment (local policy format). pub er_datetime: String, + /// Station signature over the ACM JSON (or related payload). pub signature: String, + /// Station public key PEM associated with `signature`. pub publickey: String, + /// Transfer window start timestamp string. pub transfer_start: String, } +/// EML header sub-node: official vs provisional and when that status was recorded. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLOfficialStatusDetail { + /// Status label (e.g. official). pub official_status: String, + /// Date-only or timestamp string for the status. pub status_date: String, } +/// EML document header: ids, issue time, and official status. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLHeader { + /// ACM / transaction id tying the EML to a submission. pub transaction_id: String, + /// Issue datetime string in configured format. pub issue_date: String, + /// Official vs provisional block. pub official_status_detail: EMLOfficialStatusDetail, } +/// Generic id + name pair used throughout EML (election, contest, candidate, …). #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLIdentifier { + /// External id number. pub id_number: String, + /// Display name. pub name: String, } +/// One contest block: id and aggregated vote metrics. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLContest { + /// Contest id and title. pub identifier: EMLIdentifier, + /// Per-contest totals and per-candidate breakdown. pub total_votes: EMLTotalVotes, } +/// Single status flag on a candidate row (e.g. setting code). #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLStatusItem { + /// Opaque setting key or label from source data. pub setting: String, } +/// Party / organization line for a candidate. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLAffiliation { + /// Party identifier block. pub identifier: EMLIdentifier, + /// Party short name or acronym. pub party: String, } +/// Candidate row under a selection with status and affiliation. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLCandidate { + /// Candidate id block. pub identifier: EMLIdentifier, + /// Additional status fields from source annotations. pub status_details: Vec, + /// Party affiliation subtree. pub affiliation: EMLAffiliation, } +/// Ballot selection: one or more candidates and valid vote count for that row. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLSelection { + /// Candidates listed for this selection (often one). pub candidates: Vec, + /// Valid votes attributed to this selection. pub valid_votes: i64, } +/// Named integer metric in the contest total (over/under votes, registered voters, …). #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLCountMetric { + /// Human-readable metric title. pub kind: String, + /// Short code (e.g. `OV`, `RV`). pub id: String, + /// Metric value. pub datum: i64, } +/// Contest-level totals: roll-up metrics plus per-candidate selections. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLTotalVotes { + /// Aggregate count metrics for the contest. pub count_metrics: Vec, + /// Per-candidate (or per-selection) valid vote lines. pub selections: Vec, } +/// Election subtree: identifier and its contests. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLElection { + /// Election id block. pub identifier: EMLIdentifier, + /// Contests within this election. pub contests: Vec, } +/// Region / aggregation node containing one or more elections (e.g. event-level count). #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLCount { + /// Region or reporting unit identifier. pub identifier: EMLIdentifier, + /// Elections reported under this count node. pub elections: Vec, } +/// Root EML document: id, header, and hierarchical counts. #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] pub struct EMLFile { + /// Document id (often tally session id). pub id: String, + /// Standard EML header fields. pub header: EMLHeader, + /// Top-level count regions. pub counts: Vec, } diff --git a/packages/windmill/src/services/consolidation/logs.rs b/packages/windmill/src/services/consolidation/logs.rs index 491e3cc40e..3476885941 100644 --- a/packages/windmill/src/services/consolidation/logs.rs +++ b/packages/windmill/src/services/consolidation/logs.rs @@ -1,11 +1,15 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Human-readable [`sequent_core::types::ceremonies::Log`] lines for transmission lifecycle events. + use chrono::{DateTime, Local}; use sequent_core::services::date::ISO8601; use sequent_core::types::ceremonies::Log; use tracing::{info, instrument}; +/// Log line recorded when a transmission package XML is generated for an election/area pair. #[instrument(skip_all)] pub fn create_transmission_package_log( datetime: &DateTime, @@ -23,6 +27,7 @@ pub fn create_transmission_package_log( } } +/// Log line when audit logs are POSTed to a CCS server. #[instrument(skip_all)] pub fn send_logs_to_ccs_log( datetime: &DateTime, @@ -42,6 +47,7 @@ pub fn send_logs_to_ccs_log( } } +/// Log line when the transmission package zip is sent to CCS, listing signing trustees. #[instrument(skip_all)] pub fn send_transmission_package_to_ccs_log( datetime: &DateTime, @@ -63,6 +69,7 @@ pub fn send_transmission_package_to_ccs_log( } } +/// Log line when sending audit logs to CCS fails. #[instrument(skip_all)] pub fn error_sending_logs_to_ccs_log( datetime: &DateTime, @@ -84,6 +91,7 @@ pub fn error_sending_logs_to_ccs_log( } } +/// Log line when sending the transmission package to CCS fails. #[instrument(skip_all)] pub fn error_sending_transmission_package_to_ccs_log( datetime: &DateTime, @@ -106,6 +114,7 @@ pub fn error_sending_transmission_package_to_ccs_log( } } +/// Log line when an SBEI finishes signing the transmission package XML. #[instrument(skip_all)] pub fn sign_transmission_package_log( datetime: &DateTime, diff --git a/packages/windmill/src/services/consolidation/mod.rs b/packages/windmill/src/services/consolidation/mod.rs index e2c896d5f1..49249cb8a7 100644 --- a/packages/windmill/src/services/consolidation/mod.rs +++ b/packages/windmill/src/services/consolidation/mod.rs @@ -2,6 +2,9 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Miru / ACM transmission packages: EML generation, AES and RSA helpers, zip/xz, trustee +//! signatures, and CCS upload flows. + pub mod acm_json; pub mod acm_transaction; pub mod aes_256_cbc_encrypt; diff --git a/packages/windmill/src/services/consolidation/rsa.rs b/packages/windmill/src/services/consolidation/rsa.rs index 88f86fd30d..489d9f916e 100644 --- a/packages/windmill/src/services/consolidation/rsa.rs +++ b/packages/windmill/src/services/consolidation/rsa.rs @@ -1,13 +1,20 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! RSA key generation and PKCS#12-driven signing via the bundled ECIES Java helper. + use anyhow::{Context, Result}; use openssl::rsa::{Padding, Rsa}; use sequent_core::signatures::ecies_encrypt::ECIES_TOOL_PATH; use sequent_core::signatures::shell::run_shell_command; use tracing::{info, instrument}; -// Function to generate RSA public/private key pair in PEM format +/// Generates a 2048-bit RSA key pair and returns PEM-encoded public and private strings. +/// +/// # Errors +/// +/// OpenSSL key generation or UTF-8 conversion failures. #[instrument(skip_all, err)] pub fn generate_rsa_keys() -> Result<(String, String)> { // Generate a 2048-bit RSA key pair @@ -30,7 +37,11 @@ pub fn generate_rsa_keys() -> Result<(String, String)> { Ok((public_key_pem, private_key_pem)) } -// Function to encrypt data using the RSA private key extracted from a private key PEM string +/// Encrypt data using the RSA private key extracted from a private key PEM string +/// +/// # Errors +/// +/// PEM parse errors or OpenSSL encryption failures. #[instrument(skip_all, err)] pub fn encrypt_with_rsa_private_key(private_key_pem: &str, data: &[u8]) -> Result> { // Parse the private key PEM string to get the RSA structure @@ -51,6 +62,11 @@ pub fn encrypt_with_rsa_private_key(private_key_pem: &str, data: &[u8]) -> Resul Ok(encrypted_data) } +/// Extracts a PEM public key from a `.p12` via the ECIES helper JAR. +/// +/// # Errors +/// +/// Shell command failures from [`run_shell_command`]. pub fn derive_public_key_from_p12(pk12_file_path_string: &str, password: &str) -> Result { let command = format!( "java -jar {} public-key {} {}", @@ -64,6 +80,11 @@ pub fn derive_public_key_from_p12(pk12_file_path_string: &str, password: &str) - Ok(public_pem) } +/// Signs `data_path` with the private key in the PKCS#12 file (RSA branch of the signing tool). +/// +/// # Errors +/// +/// Shell or tool failures; returns base64 signature string on success. #[instrument(skip_all, err)] pub fn rsa_sign_data( pk12_file_path_string: &str, diff --git a/packages/windmill/src/services/consolidation/send_transmission_package_service.rs b/packages/windmill/src/services/consolidation/send_transmission_package_service.rs index cb7f7da986..ef1e5eb661 100644 --- a/packages/windmill/src/services/consolidation/send_transmission_package_service.rs +++ b/packages/windmill/src/services/consolidation/send_transmission_package_service.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! HTTP delivery of transmission and audit-log zip files to configured CCS endpoints. + use super::{ create_transmission_package_service::update_transmission_package_annotations, eml_generator::{ @@ -49,10 +52,17 @@ use std::{cmp::Ordering, path::Path}; use tempfile::{tempdir, NamedTempFile}; use tracing::{info, instrument}; +/// Relative path on the CCS server for uploading election-results zip payloads. const SEND_ELECTION_RESULTS_API_PATH: &str = "/api/receiver/v1/acm/election-results"; +/// Relative path on the CCS server for uploading audit-log zip payloads. const SEND_LOGS_API_PATH: &str = "/api/receiver/v1/acm/audit-logs"; +/// Multipart POST of a zip to the given CCS base URL. +/// +/// # Errors +/// +/// I/O, HTTP client, non-success HTTP status, or response body read failures. #[instrument(err)] async fn send_package_to_ccs_server( transmission_package_path: &Path, @@ -107,6 +117,7 @@ async fn send_package_to_ccs_server( Ok(()) } +/// Picks the document with the latest `created_at` among `input_documents`. #[instrument(skip_all)] pub fn get_latest_miru_document(input_documents: &[MiruDocument]) -> Option { let mut documents = input_documents.to_owned(); @@ -128,6 +139,12 @@ pub fn get_latest_miru_document(input_documents: &[MiruDocument]) -> Option // // SPDX-License-Identifier: AGPL-3.0-only + +//! PKCS#12 parsing, ECDSA/RSA signing wrappers, and openssl certificate verification. + use anyhow::{anyhow, Context, Result}; use openssl::pkcs12::Pkcs12; use openssl::pkey::PKey; @@ -12,6 +15,11 @@ use std::io::Read; use tempfile::{tempdir, NamedTempFile, TempPath}; use tracing::{info, instrument}; +/// Returns the OpenSSL [`openssl::pkey::Id`] for the private key inside a PKCS#12 file. +/// +/// # Errors +/// +/// File I/O, PKCS#12 parse errors, or missing private key material. #[instrument(skip_all, err)] pub fn get_pk12_id(p12_path: &str, password: &str) -> Result { // Read the .p12 file @@ -28,6 +36,11 @@ pub fn get_pk12_id(p12_path: &str, password: &str) -> Result Ok(pkey.id()) } +/// Signs `data_path` with ECDSA via the ECIES helper JAR (`sign-ec`). +/// +/// # Errors +/// +/// Shell command failures. #[instrument(skip_all, err)] pub fn ecdsa_sign_data( pk12_file_path_string: &str, @@ -45,6 +58,11 @@ pub fn ecdsa_sign_data( Ok(encrypted_base64) } +/// Writes the leaf certificate from PKCS#12 into a temp `.pem` file via `openssl pkcs12`. +/// +/// # Errors +/// +/// Temp file or `openssl` command failures. pub fn get_p12_cert(p12_file: &NamedTempFile, password: &str) -> Result { let p12_file_path = p12_file.path().to_string_lossy().to_string(); let cert_temp_file = @@ -60,6 +78,11 @@ pub fn get_p12_cert(p12_file: &NamedTempFile, password: &str) -> Result Result { let cert_temp_path_string = p12_cert_path.to_string_lossy().to_string(); @@ -72,6 +95,11 @@ pub fn get_p12_fingerprint(p12_cert_path: &TempPath) -> Result { Ok(fingerprint) } +/// Verifies `p12_cert_path` chains to `root_ca` with `intermediate_cas` as untrusted intermediates. +/// +/// # Errors +/// +/// Tempdir/setup failures, `openssl verify` non-OK output, or shell errors. #[instrument(skip_all, err)] pub fn check_certificate_cas( p12_cert_path: &TempPath, diff --git a/packages/windmill/src/services/consolidation/transmission_package.rs b/packages/windmill/src/services/consolidation/transmission_package.rs index fe0173922b..3861f40b41 100644 --- a/packages/windmill/src/services/consolidation/transmission_package.rs +++ b/packages/windmill/src/services/consolidation/transmission_package.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Compresses EML, encrypts payloads, and assembles `er_` / `al_` zip packages for CCS. + use super::{ acm_json::generate_acm_json, aes_256_cbc_encrypt::encrypt_file_aes_256_cbc, @@ -36,9 +39,14 @@ use tempfile::NamedTempFile; use tracing::{info, instrument}; use velvet::pipes::generate_reports::ReportData; +/// Handlebars template object key under public assets used to wrap rendered EML XML. pub const PUBLIC_ASSETS_EML_BASE_TEMPLATE: &str = "eml_base.hbs"; -// returns (base_compressed_xml, eml, eml_hash) +/// Compresses `eml` and returns bytes plus uppercase hex SHA-256 of the original string. +/// +/// # Errors +/// +/// Hashing or xz compression errors. #[instrument(skip_all, err)] pub fn compress_hash_eml(eml: &str) -> Result<(Vec, String)> { let rendered_xml_hash = hash_sha256(eml.as_bytes()) @@ -52,6 +60,11 @@ pub fn compress_hash_eml(eml: &str) -> Result<(Vec, String)> { Ok((compressed_xml, rendered_xml_hash)) } +/// Renders full EML via public-asset template, then xz-compresses and hashes it. +/// +/// # Errors +/// +/// Template fetch/render failures, JSON errors, or [`compress_hash_eml`] errors. #[instrument(skip(reports), err)] pub async fn generate_base_compressed_xml( tally_id: &str, @@ -86,6 +99,11 @@ pub async fn generate_base_compressed_xml( Ok((compressed_xml, render_xml, rendered_xml_hash)) } +/// Writes xz to temp, AES-encrypts to `.exz`, and returns ECIES-wrapped random passphrase (base64). +/// +/// # Errors +/// +/// Temp file, openssl encrypt, or ECIES encrypt errors. #[instrument(skip(compressed_xml), err)] async fn generate_encrypted_compressed_xml( compressed_xml: Vec, @@ -108,6 +126,11 @@ async fn generate_encrypted_compressed_xml( Ok((exz_temp_file, encrypted_random_pass_base64)) } +/// Writes `.exz` + `.json` siblings for a station id and zips the temp folder to `output_file_path`. +/// +/// # Errors +/// +/// Filesystem, JSON serialization, or [`compress_folder_to_zip`] errors. #[instrument(skip_all, err)] fn generate_er_final_zip( exz_temp_file_bytes: Vec, @@ -145,6 +168,11 @@ fn generate_er_final_zip( Ok(()) } +/// Builds the audit-log zip (`al_*.zip`) for one CCS destination: logs JSON → xz → encrypt → ACM JSON → zip. +/// +/// # Errors +/// +/// Serialization, encryption, signing, or zip errors in the pipeline. #[instrument(skip(acm_key_pair), err)] pub async fn create_logs_package( time_zone: TimeZone, @@ -207,6 +235,11 @@ pub async fn create_logs_package( Ok(()) } +/// Builds the election-results zip (`er_*.zip`): encrypted xz tally payload plus ACM metadata and signatures. +/// +/// # Errors +/// +/// Same pipeline failures as [`create_logs_package`], but signing the raw EML body instead of logs JSON. #[instrument(skip(compressed_xml, acm_key_pair), err)] pub async fn create_transmission_package( eml_hash: &str, diff --git a/packages/windmill/src/services/consolidation/upload_signature_service.rs b/packages/windmill/src/services/consolidation/upload_signature_service.rs index 50ea7a15fa..b6cdb31907 100644 --- a/packages/windmill/src/services/consolidation/upload_signature_service.rs +++ b/packages/windmill/src/services/consolidation/upload_signature_service.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! SBEI P12 verification, RSA/ECDSA signing of EML payloads, and merging signatures into MIRU transmission state. + use super::{ create_transmission_package_service::{ generate_all_servers_document, update_transmission_package_annotations, @@ -65,6 +68,11 @@ use std::collections::HashMap; use tempfile::NamedTempFile; use tracing::{info, instrument}; +/// Writes updated SBEI user list (with fingerprint) into election-event MIRU annotations. +/// +/// # Errors +/// +/// Missing annotations, JSON errors, or Hasura update failures. #[instrument(skip_all, err)] async fn update_election_event_sbei_users( hasura_transaction: &Transaction<'_>, @@ -102,6 +110,11 @@ async fn update_election_event_sbei_users( .await } +/// Replaces or adds one [`MiruSignature`] and rebuilds the parallel [`ACMTrustee`] list for ACM JSON. +/// +/// # Errors +/// +/// Missing election event, annotations, or SBEI user for the signature’s `sbei_miru_id`. #[instrument(skip_all, err)] async fn update_signatures( hasura_transaction: &Transaction<'_>, @@ -153,6 +166,11 @@ async fn update_signatures( Ok((acm_trustees, new_miru_signatures)) } +/// Extracts the public key PEM from a password-protected PKCS#12 file. +/// +/// # Errors +/// +/// OpenSSL / PKCS#12 parse errors from [`derive_public_key_from_p12`]. #[instrument(skip_all, err)] pub fn derive_public_key_from_private_key( private_key_temp_file: &NamedTempFile, @@ -163,6 +181,12 @@ pub fn derive_public_key_from_private_key( derive_public_key_from_p12(&pk12_file_path_string, password) } +/// Ensures the P12 fingerprint is unique across posts, matches any stored +/// fingerprint for the SBEI, and optionally validates CA chain. +/// +/// # Errors +/// +/// Cert extraction, duplicate use in another election, fingerprint mismatch, or CA validation errors. #[instrument(skip_all, err)] pub fn check_sbei_certificate( transmission_data: &MiruTallySessionData, @@ -217,6 +241,11 @@ pub fn check_sbei_certificate( Ok(input_pk_fingerprint) } +/// Signs the EML temp file with the SBEI’s P12 (RSA or EC) and returns a [`MiruSignature`] bundle. +/// +/// # Errors +/// +/// Unsupported key type, or signing / PKCS#12 errors from the OpenSSL stack. #[instrument(skip_all, err)] pub fn create_server_signature( eml_data: NamedTempFile, @@ -253,6 +282,12 @@ pub fn create_server_signature( }) } +/// Verifies the user’s P12, signs the current transmission EML, +/// merges the signature, re-uploads packages, and updates annotations. +/// +/// # Errors +/// +/// Auth/lookup failures, certificate checks, signing, document pipeline, or persistence errors. #[instrument(err)] pub async fn upload_transmission_package_signature_service( tenant_id: &str, diff --git a/packages/windmill/src/services/consolidation/xz_compress.rs b/packages/windmill/src/services/consolidation/xz_compress.rs index 1408079b7c..8bd02fb8c3 100644 --- a/packages/windmill/src/services/consolidation/xz_compress.rs +++ b/packages/windmill/src/services/consolidation/xz_compress.rs @@ -1,14 +1,23 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! In-memory xz compression at a fixed high level. + use anyhow::{Context, Result}; use std::io::prelude::*; use std::io::Cursor; use tracing::instrument; use xz2::read::{XzDecoder, XzEncoder}; +/// xz compression preset passed to [`XzEncoder`]. const XZ_COMPRESSION_LEVEL: u32 = 9; +/// Returns the xz-compressed bytes of `data`. +/// +/// # Errors +/// +/// Compression I/O failures from the `xz2` encoder. #[instrument(skip_all, err)] pub fn xz_compress(data: &[u8]) -> Result> { // Create a cursor for the input data diff --git a/packages/windmill/src/services/consolidation/zip.rs b/packages/windmill/src/services/consolidation/zip.rs index a0ac40f3e4..951c404af9 100644 --- a/packages/windmill/src/services/consolidation/zip.rs +++ b/packages/windmill/src/services/consolidation/zip.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Zip create/extract helpers for folder trees and archives. + use anyhow::{Context, Result}; use std::fs; use std::fs::File; @@ -11,6 +14,11 @@ use walkdir::WalkDir; use zip::read::ZipArchive; use zip::write::{FileOptions, SimpleFileOptions}; +/// Recursively zips `src_dir` into `dst_file` with deflated entries. +/// +/// # Errors +/// +/// Walkdir, zip writer, or filesystem errors. #[instrument(skip_all, err)] pub fn compress_folder_to_zip(src_dir: &Path, dst_file: &Path) -> Result<()> { let path = src_dir.clone(); @@ -48,6 +56,11 @@ pub fn compress_folder_to_zip(src_dir: &Path, dst_file: &Path) -> Result<()> { Ok(()) } +/// Extracts `src_file` into `dst_dir`, preserving basic directory structure. +/// +/// # Errors +/// +/// Archive read errors or filesystem failures while writing members. #[instrument(skip_all, err)] pub fn unzip_file(src_file: &Path, dst_dir: &Path) -> Result<()> { let file = File::open(src_file) diff --git a/packages/windmill/src/services/custom_url.rs b/packages/windmill/src/services/custom_url.rs index 34de8cd661..500e418992 100644 --- a/packages/windmill/src/services/custom_url.rs +++ b/packages/windmill/src/services/custom_url.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Managing custom URLs and hostname mappings. + use super::cloudflare::{get_cloudflare_vars, ApiResponse, CloudflareError}; use reqwest::Client; use rocket::futures::stream::Forward; @@ -12,79 +14,125 @@ use std::fmt; use tracing::{error, info, instrument}; #[derive(Debug, Deserialize)] +/// Cloudflare page rule as returned by the API. pub struct PageRule { + /// Page rule identifier. pub id: String, + /// Match targets for this page rule. pub targets: Vec, + /// Actions applied when targets match. pub actions: Vec, } #[derive(Debug, Serialize, Deserialize)] +/// Cloudflare page rule target entry. pub struct Target { + /// Target type (e.g. "url"). pub target: String, + /// Constraint defining the match condition. pub constraint: Constraint, } #[derive(Debug, Serialize, Deserialize)] +/// Constraint used in a page rule target. pub struct Constraint { + /// Match operator (e.g. "matches"). pub operator: String, + /// Operator argument value. pub value: String, } #[derive(Debug, Serialize, Deserialize)] +/// Request body for creating a Cloudflare page rule. struct CreatePageRuleRequest { + /// Match targets (typically URL constraints) for the page rule. targets: Vec, + /// Actions applied when the targets match. actions: Vec, + /// Page rule status ("active"/"disabled"). status: String, } #[derive(Debug, Serialize, Deserialize)] +/// Previously configured custom URLs for tenant endpoints. pub struct PreviousCustomUrls { + /// Login URL prefix. pub login: String, + /// Enrollment URL prefix. pub enrollment: String, + /// SAML URL prefix. pub saml: String, } #[derive(Serialize, Deserialize, Debug)] +/// Request body for creating a DNS record via Cloudflare. pub struct CreateDNSRecordRequest { #[serde(rename = "type")] + /// DNS record type ("A", "CNAME", ...). record_type: String, + /// DNS record name. name: String, + /// DNS record content (IP/hostname). content: String, + /// DNS record TTL in seconds. ttl: u64, + /// Whether DNS proxying is enabled for this record. proxied: bool, } #[derive(Debug, Clone, Serialize, Deserialize)] +/// Forwarding configuration for a URL action. struct ForwardURL { + /// Destination URL. url: String, + /// HTTP status code for the redirect. status_code: u64, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] +/// Heterogeneous action payload values. enum ActionValue { + /// String action value. String(String), + /// Integer action value. Integer(i64), + /// Forwarding URL action value. ForwardURL(ForwardURL), } #[derive(Debug, Serialize, Deserialize)] +/// Cloudflare page rule action entry. struct Action { + /// Action identifier (e.g. "forwarding_url"). id: String, + /// Action payload. value: ActionValue, } #[derive(Debug, Serialize, Deserialize)] +/// DNS record entry as returned by Cloudflare. struct DnsRecord { #[serde(rename = "type")] + /// DNS record type ("A", "CNAME", ...). record_type: String, + /// DNS record name. name: String, + /// DNS record content (IP/hostname). content: String, + /// DNS record TTL in seconds. ttl: u32, + /// Whether Cloudflare proxying is enabled for this record. proxied: bool, + /// Cloudflare record identifier. id: String, } #[instrument] +/// Get a Cloudflare page rule by target value. +/// +/// # Errors +/// +/// Returns an error if Cloudflare page rules cannot be retrieved. pub async fn get_page_rule(target_value: &str) -> Result, Box> { info!("target_value {:?}", target_value); let page_rules = get_all_page_rules().await?; @@ -93,12 +141,25 @@ pub async fn get_page_rule(target_value: &str) -> Result, Box Result, Box> { let dns_records = get_all_dns_records().await?; Ok(find_matching_dns_record(dns_records, record_name)) } #[instrument] +/// Set a custom URL for a given origin. +/// +/// # Errors +/// +/// Returns an error if fetching/updating DNS records or page rules fails. +/// +/// # Panics +/// +/// Panics if `key` is not one of: `"login"`, `"enrollment"`, or `"saml"`. pub async fn set_custom_url( origin: &str, redirect_to: &str, @@ -183,6 +244,11 @@ pub async fn set_custom_url( } #[instrument] +/// Retrieve all page rules for the configured Cloudflare zone. +/// +/// # Errors +/// +/// Returns an error if the request fails. async fn get_all_page_rules() -> Result, Box> { let (zone_id, api_key) = get_cloudflare_vars()?; info!("zone_id {:?}", zone_id); @@ -220,6 +286,11 @@ async fn get_all_page_rules() -> Result, Box> { } #[instrument] +/// Retrieve all DNS records for the configured Cloudflare zone. +/// +/// # Errors +/// +/// Returns an error if the request fails. async fn get_all_dns_records() -> Result, Box> { let (zone_id, api_key) = get_cloudflare_vars()?; info!("zone_id {:?}", zone_id); @@ -257,6 +328,7 @@ async fn get_all_dns_records() -> Result, Box> { } #[instrument] +/// Find a DNS record whose first label matches `expected_name`. fn find_matching_dns_record(records: Vec, expected_name: &str) -> Option { info!("find_matching_dns_record expected_name:{}", expected_name); for record in records { @@ -275,6 +347,7 @@ fn find_matching_dns_record(records: Vec, expected_name: &str) -> Opt } #[instrument] +/// Find a page rule whose redirect URL matches `expected_redirect_url`. fn find_matching_target(rules: Vec, expected_redirect_url: &str) -> Option { for rule in rules { for action in &rule.actions { @@ -289,6 +362,7 @@ fn find_matching_target(rules: Vec, expected_redirect_url: &str) -> Op } #[instrument] +/// Create a payload for a Cloudflare page rule. fn create_payload(origin: &str, redirect_to: &str) -> CreatePageRuleRequest { let targets = vec![Target { constraint: Constraint { @@ -314,6 +388,7 @@ fn create_payload(origin: &str, redirect_to: &str) -> CreatePageRuleRequest { } #[instrument] +/// Create a payload for a Cloudflare DNS record. fn create_dns_payload(origin: &str) -> CreateDNSRecordRequest { let cloudflare_ip_dns_content = std::env::var("CUSTOM_URLS_IP_DNS_CONTENT") .unwrap_or_else(|_| "default.ip.address".to_string()); @@ -327,6 +402,11 @@ fn create_dns_payload(origin: &str) -> CreateDNSRecordRequest { } } +/// Create a Cloudflare DNS record. +/// +/// # Errors +/// +/// Returns an error if the request fails. pub async fn create_dns_record(redirect_to: &str, dns_prefix: &str) -> Result<(), Box> { let client = Client::new(); let (zone_id, api_key) = match get_cloudflare_vars() { @@ -373,6 +453,11 @@ pub async fn create_dns_record(redirect_to: &str, dns_prefix: &str) -> Result<() } } +/// Update a Cloudflare DNS record. +/// +/// # Errors +/// +/// Returns an error if the request fails. pub async fn update_dns_record( id: &str, redirect_to: &str, @@ -423,6 +508,11 @@ pub async fn update_dns_record( } } +/// Update a Cloudflare page rule that forwards `origin` to `redirect_to`. +/// +/// # Errors +/// +/// Returns an error if the request fails. async fn update_page_rule( rule_id: &str, redirect_to: &str, @@ -456,6 +546,11 @@ async fn update_page_rule( } } +/// Create a new Cloudflare page rule that forwards `origin` to `redirect_to`. +/// +/// # Errors +/// +/// Returns an error if the request fails. async fn create_page_rule(redirect_to: &str, origin: &str) -> Result<(), Box> { let (zone_id, api_key) = get_cloudflare_vars()?; let client = Client::new(); diff --git a/packages/windmill/src/services/database.rs b/packages/windmill/src/services/database.rs index ca180d72a6..f63369a305 100644 --- a/packages/windmill/src/services/database.rs +++ b/packages/windmill/src/services/database.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Postgres connection pools for Hasura-tracked data and for the Keycloak database. + use anyhow::{anyhow, Result}; use async_once::AsyncOnce; use celery::export::Arc; @@ -20,11 +22,17 @@ use openssl::ssl::{SslConnector, SslMethod}; use postgres_openssl::MakeTlsConnector; #[derive(Debug, Deserialize)] +/// Postgres connectivity and query limit configuration loaded from the environment. pub struct PgConfig { + /// Deadpool configuration for the Keycloak database. pub keycloak_db: deadpool_postgres::Config, + /// Deadpool configuration for the Hasura database. pub hasura_db: deadpool_postgres::Config, + /// Low limit used for queries that may return many rows. pub low_sql_limit: i32, + /// Default limit used for paginated queries. pub default_sql_limit: i32, + /// Default batch size used for bulk operations. pub default_sql_batch_size: i32, } @@ -41,6 +49,11 @@ impl Default for PgConfig { } impl PgConfig { + /// Load Postgres configuration from environment variables. + /// + /// # Errors + /// + /// Returns an error if configuration cannot be built from the environment or deserialization fails. pub fn from_env() -> Result { Config::builder() .add_source(Environment::default().separator("__")) @@ -52,6 +65,11 @@ impl PgConfig { } #[instrument(err)] +/// Generate a pool for the Keycloak database. +/// +/// # Errors +/// +/// Returns an error if configuration is missing/invalid or the pool cannot be created. pub async fn generate_keycloak_pool() -> Result> { let config = PgConfig::from_env()?; @@ -104,6 +122,11 @@ pub async fn generate_keycloak_pool() -> Result> { } #[instrument(err)] +/// Generate a pool for the Hasura database. +/// +/// # Errors +/// +/// Returns an error if configuration is missing/invalid or the pool cannot be created. pub async fn generate_hasura_pool() -> Result> { let config = PgConfig::from_env()?; @@ -172,10 +195,12 @@ lazy_static! { }); } +/// Return the process-wide Keycloak Postgres pool. pub async fn get_keycloak_pool() -> Arc { KEYCLOAK_POOL.get().await.clone() } +/// Return the process-wide Hasura Postgres pool. pub async fn get_hasura_pool() -> Arc { HASURA_POOL.get().await.clone() } diff --git a/packages/windmill/src/services/datafix/api_datafix.rs b/packages/windmill/src/services/datafix/api_datafix.rs index 825392befa..20a7f0993d 100644 --- a/packages/windmill/src/services/datafix/api_datafix.rs +++ b/packages/windmill/src/services/datafix/api_datafix.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Datafix operations: resolve a datafix-scoped election event, then mutate the +//! matching Keycloak voter (create, update, disable, mark voted, PIN rotation) using admin APIs. use super::types::*; use super::utils::*; @@ -22,6 +24,11 @@ use std::env; use tracing::{error, info, instrument, warn}; /// Disable the voter, datafix users are not actually deleted but just disabled. /// Note: voter_id in Datafix API represents the username in Keycloak/Sequent´s system. +/// +/// # Errors +/// +/// Returns a JSON [`DatafixResponse`] with `4xx`/`5xx` when the datafix event cannot be resolved, +/// Keycloak admin calls fail, or the user cannot be edited. #[instrument(skip(hasura_transaction, keycloak_transaction))] pub async fn disable_datafix_voter( hasura_transaction: &Transaction<'_>, @@ -69,7 +76,13 @@ pub async fn disable_datafix_voter( Ok(DatafixResponse::new(Status::Ok)) } -/// Note: voter_id in Datafix API represents the username in Keycloak/Sequent´s system. +/// Creates a new voter in the event realm. +/// Note: `voter_id` in Datafix API represents the username in Keycloak/Sequent´s system. +/// +/// # Errors +/// +/// Returns a JSON error payload when the datafix event is unknown, the area name cannot be matched, +/// birthdate format is invalid, `KEYCLOAK_VOTER_GROUP_NAME` is unset, or Keycloak rejects creation. #[instrument(skip(hasura_transaction))] pub async fn add_datafix_voter( hasura_transaction: &Transaction<'_>, @@ -135,6 +148,10 @@ pub async fn add_datafix_voter( /// There are 2 things that can be updated, the area and the birthdate. /// Note: voter_id in Datafix API represents the username in Keycloak/Sequent´s system. +/// +/// # Errors +/// +/// Same failure modes as [`add_datafix_voter`], plus errors when the user id cannot be resolved or Keycloak update fails. #[instrument(skip(hasura_transaction, keycloak_transaction))] pub async fn update_datafix_voter( hasura_transaction: &Transaction<'_>, @@ -201,6 +218,10 @@ pub async fn update_datafix_voter( /// Mark a voter as having voted via a given channel /// Also disables the voter so it cannot vote online +/// +/// # Errors +/// +/// Returns JSON errors when the datafix event cannot be resolved, Keycloak admin calls fail, or the user cannot be located. #[instrument(skip(hasura_transaction, keycloak_transaction))] pub async fn mark_as_voted_via_channel( hasura_transaction: &Transaction<'_>, @@ -252,6 +273,10 @@ pub async fn mark_as_voted_via_channel( /// Unmark a voter as having voted, set the attribute to None /// Also enables the voter +/// +/// # Errors +/// +/// Returns JSON errors when the datafix event cannot be resolved, Keycloak admin calls fail, or the user cannot be located. #[instrument(skip(hasura_transaction, keycloak_transaction))] pub async fn unmark_voter_as_voted( hasura_transaction: &Transaction<'_>, @@ -304,6 +329,11 @@ pub async fn unmark_voter_as_voted( } /// Generate a new password. +/// +/// # Errors +/// +/// Returns JSON errors when the datafix metadata cannot be loaded, the user is disabled/not unique, +/// Keycloak admin calls fail, or password application fails. #[instrument(skip(hasura_transaction, keycloak_transaction))] pub async fn replace_voter_pin( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/datafix/mod.rs b/packages/windmill/src/services/datafix/mod.rs index 860db8c646..84f8b61dde 100644 --- a/packages/windmill/src/services/datafix/mod.rs +++ b/packages/windmill/src/services/datafix/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Datafix service modules: datafix HTTP endpoints and the VoterView SOAP bridge. + pub mod api_datafix; pub mod types; pub mod utils; diff --git a/packages/windmill/src/services/datafix/types.rs b/packages/windmill/src/services/datafix/types.rs index 592f1f301b..4df19a56b6 100644 --- a/packages/windmill/src/services/datafix/types.rs +++ b/packages/windmill/src/services/datafix/types.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! JSON bodies, annotation payloads, and small enums used by the datafix HTTP API. use super::utils::{DATAFIX_ID_KEY, DATAFIX_PSW_POLICY_KEY, DATAFIX_VOTERVIEW_REQ_KEY}; use anyhow::{anyhow, Result}; use rand::{distr, Rng}; @@ -14,31 +15,47 @@ use tracing::{instrument, warn}; use crate::postgres::election_event::ElectionEventDatafix; use crate::services::consolidation::eml_generator::ValidateAnnotations; + +/// Request body used to create or update a voter through the datafix API. #[derive(Deserialize, Debug)] pub struct VoterInformationBody { + /// Keycloak username for the voter. pub voter_id: String, + /// Ward segment used to build the canonical area name. pub ward: String, + /// Optional school-board segment appended after the ward when present. pub schoolboard: Option, + /// Optional poll segment appended as the final suffix of the composed area name. pub poll: Option, + /// Optional `YYYY-MM-DD` birthdate. pub birthdate: Option, + /// Whether to enable or disable the account. pub enabled: Option, } +/// Request body to mark a voter as voted via an external channel. #[derive(Deserialize, Debug)] pub struct MarkVotedBody { + /// Keycloak username for the voter. pub voter_id: String, + /// Channel string stored in the `VOTED_CHANNEL` user attribute. pub channel: String, } +/// Standard JSON error response used by datafix endpoints. #[derive(Serialize, Deserialize, Debug)] pub struct DatafixResponse { + /// HTTP status code echoed to API consumers. pub code: u16, + /// Reason phrase for the status. pub message: String, } +/// Convenience alias for JSON-encoded datafix errors. pub type JsonErrorResponse = Json; impl DatafixResponse { + /// Wraps a Rocket [`Status`] inside [`Json`] for uniform error responses. #[instrument] pub fn new(status: Status) -> JsonErrorResponse { Json(DatafixResponse { @@ -48,51 +65,71 @@ impl DatafixResponse { } } +/// VoterView SOAP connection details embedded in election event annotations. #[derive(Deserialize, Serialize, Debug)] pub struct VoterviewRequest { + /// SOAP endpoint base URL configured per election event. pub url: String, + /// MVV web-service username. pub usr: String, + /// MVV web-service password. pub psw: String, + /// County/municipality code required by the VoterView SOAP actions. pub county_mun: String, } +/// Structured datafix annotation payloads attached to an election event. #[derive(Deserialize, Serialize, Debug)] pub struct DatafixAnnotations { + /// Opaque id advertised to datafix API clients. pub id: String, + /// Rules for generating replacement passwords. pub password_policy: PasswordPolicy, + /// Credentials and endpoint data for outbound VoterView synchronization. pub voterview_request: VoterviewRequest, } +/// How generated passwords are combined with the voter id. #[derive(Default, Display, Serialize, Deserialize, Debug, Clone, EnumString)] pub enum BasePolicy { + /// Concatenate the voter id with the generated secret (used for legacy PIN formats). #[strum(serialize = "id-password-concatenated")] #[serde(rename = "id-password-concatenated")] IdPswConcat, + /// Use only the generated secret without prefixing the voter id. #[default] #[strum(serialize = "password-only")] #[serde(rename = "password-only")] PswOnly, } +/// Character set used when generating random passwords. #[derive(Default, Display, Serialize, Deserialize, Debug, Clone, EnumString)] pub enum CharactersPolicy { + /// Digits-only secret. #[strum(serialize = "numeric")] #[serde(rename = "numeric")] Numeric, + /// Alphanumeric secret. #[default] #[strum(serialize = "alphanumeric")] #[serde(rename = "alphanumeric")] Alphanumeric, } +/// Password generation policy stored in election event annotations. #[derive(Deserialize, Serialize, Debug)] pub struct PasswordPolicy { + /// Whether the voter id is prefixed onto the generated token. base: BasePolicy, + /// Length of the randomly generated portion (digits or alphanumeric run). size: usize, + /// Character set policy for the generated portion. characters: CharactersPolicy, } impl PasswordPolicy { + /// Builds a credential string for `voter_id` following `base` and `characters` rules. #[instrument] pub fn generate_password(self, voter_id: &str) -> String { let pin = match self.characters { @@ -151,8 +188,11 @@ impl ValidateAnnotations for ElectionEventDatafix { } } +/// Supported SOAP request types for the VoterView integration. #[derive(Display, Debug, Clone)] pub enum SoapRequest { + /// `SetVoted` SOAP action after an internet ballot is accepted. SetVoted, + /// `SetNotVoted` SOAP action when a vote must be rolled back in VoterView. SetNotVoted, } diff --git a/packages/windmill/src/services/datafix/utils.rs b/packages/windmill/src/services/datafix/utils.rs index b90592a9e4..dc94369fd9 100644 --- a/packages/windmill/src/services/datafix/utils.rs +++ b/packages/windmill/src/services/datafix/utils.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Shared helpers and constants for datafix flows. use super::types::*; use crate::postgres::area::get_event_areas; use crate::postgres::election_event::get_election_event_by_id; @@ -18,8 +19,11 @@ use sequent_core::types::keycloak::{ use std::collections::HashMap; use tracing::{error, info, instrument, warn}; +/// Annotation key storing the opaque datafix id that API clients present. pub const DATAFIX_ID_KEY: &str = "datafix:id"; +/// Annotation key storing JSON for password generation rules. pub const DATAFIX_PSW_POLICY_KEY: &str = "datafix:password_policy"; +/// Annotation key storing JSON credentials for the VoterView SOAP integration. pub const DATAFIX_VOTERVIEW_REQ_KEY: &str = "datafix:voterview_request"; /// Returns true if the voter has voted via Sequent´s system - @@ -46,6 +50,11 @@ pub fn voted_via_not_internet_channel(attributes: &HashMap>) } } /// Gets the election_event_id and the DatafixAnnotations of the event that has the datafix id in its annotations. +/// +/// # Errors +/// +/// Returns an HTTP-shaped [`JsonErrorResponse`] when election events cannot be loaded, no matching +/// datafix id is found, or annotation deserialization fails for the matching event. #[instrument(skip(hasura_transaction))] pub async fn get_event_id_and_datafix_annotations( hasura_transaction: &Transaction<'_>, @@ -97,6 +106,11 @@ pub async fn get_event_id_and_datafix_annotations( } /// Returns the UserArea object. If it cannot find the area id by name returns an error. +/// +/// # Errors +/// +/// Returns [`JsonErrorResponse`] when event areas cannot be loaded or when the composed area name +/// does not exist for the election event. #[instrument(skip_all)] pub async fn find_user_area_by_name( hasura_transaction: &Transaction<'_>, @@ -143,7 +157,12 @@ pub async fn find_user_area_by_name( } } -/// Get user id by username +/// Get user id by username. +/// +/// # Errors +/// +/// Returns [`JsonErrorResponse`] when the query fails, no user exists, or more than one user +/// matches the username filter. #[instrument(skip(keycloak_transaction))] pub async fn get_user_id( keycloak_transaction: &Transaction<'_>, @@ -171,6 +190,10 @@ pub async fn get_user_id( } /// Get the ElectionEvent and check if its a datafix election event (has datafix:id annotations). +/// +/// # Errors +/// +/// Propagates errors from [`get_election_event_by_id`]. #[instrument(skip(hasura_transaction), err)] pub async fn is_datafix_election_event_by_id( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/datafix/voterview_requests.rs b/packages/windmill/src/services/datafix/voterview_requests.rs index c67b8d8f85..1043864bed 100644 --- a/packages/windmill/src/services/datafix/voterview_requests.rs +++ b/packages/windmill/src/services/datafix/voterview_requests.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! SOAP request formatting and sending for the VoterView integration. + use super::types::*; use crate::postgres::election_event::ElectionEventDatafix; use crate::services::consolidation::eml_generator::ValidateAnnotations; @@ -12,6 +14,7 @@ use sequent_core::util::date_time::generate_timestamp; use tracing::{info, instrument}; impl SoapRequest { + /// Builds the SOAP XML envelope for a `SetNotVoted` request. fn get_set_not_voted_body( annotations: &DatafixAnnotations, voter_id: &str, @@ -36,6 +39,7 @@ impl SoapRequest { "# ) } + /// Builds the SOAP XML envelope for a `SetVoted` request. fn get_set_voted_body( annotations: &DatafixAnnotations, voter_id: &str, @@ -62,6 +66,7 @@ impl SoapRequest { ) } + /// Returns the SOAP body for the request type. pub fn get_body( &self, annotations: &DatafixAnnotations, @@ -77,6 +82,12 @@ impl SoapRequest { } } +/// Sends a VoterView SOAP request for the given `req_type` using event annotations for endpoint and credentials. +/// +/// # Errors +/// +/// Returns an error if `username` is `None`, annotations cannot be parsed, the HTTP request fails, +/// or the endpoint replies with a non-success status. #[instrument(skip(election_event), err)] pub async fn send( req_type: SoapRequest, @@ -153,6 +164,7 @@ pub async fn send( } } +/// Parses a tag from the response text. pub fn parse_tag(open_tag: &str, close_tag: &str, response_txt: &str) -> Option { match response_txt.split(open_tag).collect::>() { after if after.len() > 1 => match after[1].split(close_tag).collect::>() { diff --git a/packages/windmill/src/services/delete_election_event.rs b/packages/windmill/src/services/delete_election_event.rs index e350014e90..bbf273d9cc 100644 --- a/packages/windmill/src/services/delete_election_event.rs +++ b/packages/windmill/src/services/delete_election_event.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Coordinated deletion of an election event and dependent rows. + use super::jwks::remove_realm_jwks; use super::protocol_manager::{get_b3_pgsql_client, get_election_board}; use crate::postgres::election::get_elections; @@ -16,6 +19,11 @@ use tracing::info; use tracing::{event, instrument, Level}; #[instrument(err)] +/// Delete a Keycloak realm if it exists. +/// +/// # Errors +/// +/// Returns an error if Keycloak cannot be reached or realm deletion fails. pub async fn delete_keycloak_realm(realm: &str) -> Result<()> { let client = KeycloakAdminClient::new().await?; remove_realm_jwks(realm).await?; @@ -39,6 +47,11 @@ pub async fn delete_keycloak_realm(realm: &str) -> Result<()> { } #[instrument(err)] +/// Delete B3 boards for an election event and its elections. +/// +/// # Errors +/// +/// Returns an error if the B3 client cannot be created or boards cannot be deleted. pub async fn delete_event_b3( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -60,6 +73,11 @@ pub async fn delete_event_b3( } #[instrument(err)] +/// Delete B3 boards for an election event and the provided election ids, if present. +/// +/// # Errors +/// +/// Returns an error if environment configuration is missing or B3 operations fail. pub async fn delete_election_event_b3( tenant_id: &str, election_event_id: &str, @@ -88,6 +106,11 @@ pub async fn delete_election_event_b3( } #[instrument(err)] +/// Delete the immudb database associated with an election event, if it exists. +/// +/// # Errors +/// +/// Returns an error if immudb cannot be reached or database deletion fails. pub async fn delete_election_event_immudb(tenant_id: &str, election_event_id: &str) -> Result<()> { let mut client = get_immudb_client().await?; let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; @@ -110,6 +133,11 @@ pub async fn delete_election_event_immudb(tenant_id: &str, election_event_id: &s } #[instrument(err)] +/// Delete S3 documents associated with an election event (private and public prefixes). +/// +/// # Errors +/// +/// Returns an error if bucket resolution fails or S3 deletion fails. pub async fn delete_election_event_related_documents( tenant_id: &str, election_event_id: &str, diff --git a/packages/windmill/src/services/documents.rs b/packages/windmill/src/services/documents.rs index 6a105486c0..ab3a212ee3 100644 --- a/packages/windmill/src/services/documents.rs +++ b/packages/windmill/src/services/documents.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Document uploads, storage keys, and public document URL generation. + use crate::postgres::document::insert_document; use crate::services::database::{get_hasura_pool, get_keycloak_pool}; use anyhow::{anyhow, Context, Result as AnyhowResult}; @@ -19,6 +21,11 @@ use sequent_core::services::date::ISO8601; use sequent_core::services::s3; #[instrument(skip(hasura_transaction), err)] +/// Upload a document to S3 and insert the document record into the database. +/// +/// # Errors +/// +/// Returns an error if inserting the document record or uploading to S3 fails. pub async fn upload_and_return_document( hasura_transaction: &Transaction<'_>, file_path: &str, @@ -79,6 +86,10 @@ pub async fn upload_and_return_document( /// The document is associated with the given election event ID and tenant ID. /// The Document path does not include the document ID and will be used /// for when the UI does not have access to the document ID. +/// +/// # Errors +/// +/// Returns an error if inserting the document record or uploading to S3 fails. #[instrument(skip(hasura_transaction), err)] pub async fn upload_and_return_public_event_document( hasura_transaction: &Transaction<'_>, @@ -123,6 +134,11 @@ pub async fn upload_and_return_public_event_document( } #[instrument(skip(hasura_transaction), err)] +/// Insert a document record into the database and generate an upload URL. +/// +/// # Errors +/// +/// Returns an error if the document record cannot be inserted or an upload URL cannot be generated. pub async fn get_upload_url( hasura_transaction: &Transaction<'_>, name: &str, @@ -161,6 +177,11 @@ pub async fn get_upload_url( } #[instrument(err)] +/// Get the URL for a document from S3. +/// +/// # Errors +/// +/// Returns an error if the document cannot be loaded or its S3 URL cannot be generated. pub async fn get_document_url( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -207,6 +228,11 @@ pub async fn get_document_url( } #[instrument(err)] +/// Get a document from S3 and return it as a temporary file. +/// +/// # Errors +/// +/// Returns an error if the object cannot be retrieved from S3 into a temporary file. pub async fn get_document_as_temp_file( tenant_id: &str, document: &Document, diff --git a/packages/windmill/src/services/election.rs b/packages/windmill/src/services/election.rs index ecb9554a13..8656fe7f9e 100644 --- a/packages/windmill/src/services/election.rs +++ b/packages/windmill/src/services/election.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Election record updates, presentation payloads, and helpers. + use anyhow::{anyhow, Context, Result}; use deadpool_postgres::Client as DbClient; use deadpool_postgres::Transaction; @@ -16,6 +19,8 @@ use uuid::Uuid; use crate::postgres::election::get_elections; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +/// Lightweight election data used by list and lookup endpoints. +#[allow(missing_docs)] pub struct ElectionHead { pub id: String, pub name: String, @@ -38,6 +43,11 @@ impl TryFrom for ElectionHead { } #[instrument(err)] +/// List elections belonging to a given election event. +/// +/// # Errors +/// +/// Returns an error if the elections cannot be retrieved or converted. pub async fn get_election_event_elections( hasura_transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/services/election_dates.rs b/packages/windmill/src/services/election_dates.rs index d261efbbdf..238269996d 100644 --- a/packages/windmill/src/services/election_dates.rs +++ b/packages/windmill/src/services/election_dates.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing voting calendar and scheduled-date materialization for elections and events. + use crate::postgres::election::*; use crate::postgres::scheduled_event::*; use crate::services::election_event_status::get_election_event_status; @@ -15,6 +18,11 @@ use std::str::FromStr; use tracing::instrument; #[instrument(skip(hasura_transaction), err)] +/// Create, update, or archive a scheduled event for an election date. +/// +/// # Errors +/// +/// Returns an error if the election cannot be loaded or scheduled event operations fail. pub async fn manage_dates( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -104,6 +112,11 @@ pub async fn manage_dates( } #[instrument(err, skip_all)] +/// Compute voting period dates and attach scheduled event dates for an election. +/// +/// # Errors +/// +/// Returns an error if scheduled dates cannot be prepared. pub fn get_election_dates( election: &Election, scheduled_events: Vec, diff --git a/packages/windmill/src/services/election_event_board.rs b/packages/windmill/src/services/election_event_board.rs index 6a11f5a8c6..9ce8c6c5e2 100644 --- a/packages/windmill/src/services/election_event_board.rs +++ b/packages/windmill/src/services/election_event_board.rs @@ -2,15 +2,21 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Bulletin board name resolution and wiring for an election event's verifiable log. + use b3::client::pgsql::B3IndexRow; use sequent_core::serialization::deserialize_with_path::deserialize_value; use serde::{Deserialize, Serialize}; use serde_json::value::Value; #[derive(Deserialize, Serialize, Debug, Clone)] +/// Serializable view of a B3 board reference stored in event configuration. pub struct BoardSerializable { + /// Internal numeric board id. pub id: i64, + /// Database/board name used by B3/immudb backends. pub database_name: String, + /// Whether the board has been archived. pub is_archived: bool, } @@ -24,6 +30,7 @@ impl From for BoardSerializable { } } +/// Get the database name for a B3 board reference. pub fn get_election_event_board(bulletin_board_reference: Option) -> Option { bulletin_board_reference.and_then(|board_json| { let opt_board: Option = deserialize_value(board_json).ok(); diff --git a/packages/windmill/src/services/election_event_dates.rs b/packages/windmill/src/services/election_event_dates.rs index 46eb0e7ed7..891447cdb5 100644 --- a/packages/windmill/src/services/election_event_dates.rs +++ b/packages/windmill/src/services/election_event_dates.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Managing election-event-specific scheduling windows and derived date fields. + use std::str::FromStr; use crate::postgres::election_event::get_election_event_by_id; @@ -14,6 +16,11 @@ use sequent_core::types::scheduled_event::*; use tracing::{info, instrument}; #[instrument(skip(hasura_transaction), err)] +/// Create, update, or archive a scheduled event date for an election event. +/// +/// # Errors +/// +/// Returns an error if scheduled event operations fail. pub async fn manage_dates( hasura_transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/services/election_event_statistics.rs b/packages/windmill/src/services/election_event_statistics.rs index 03773364dd..2b8cf7c63c 100644 --- a/packages/windmill/src/services/election_event_statistics.rs +++ b/packages/windmill/src/services/election_event_statistics.rs @@ -1,15 +1,20 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing statistics for a single election event. + use anyhow::Result; use deadpool_postgres::Transaction; use sequent_core::services::uuid_validation::parse_uuid_v4; use tokio_postgres::row::Row; use tracing::instrument; -/** - * Returns the count of areas per election event - */ +/// Return the number of areas configured for an election event. +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. #[instrument(skip(transaction), err)] pub async fn get_count_areas( transaction: &Transaction<'_>, @@ -51,9 +56,11 @@ pub async fn get_count_areas( Ok(total_areas) } -/** - * Returns the count of elections in an election event - */ +/// Return the number of elections belonging to an election event. +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. #[instrument(skip(transaction), err)] pub async fn get_count_elections( transaction: &Transaction<'_>, @@ -95,6 +102,11 @@ pub async fn get_count_elections( Ok(total_elections) } +/// Increment election event statistics counters for communications. +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. #[instrument(skip(transaction), err)] pub async fn update_election_event_statistics( transaction: &Transaction<'_>, @@ -140,6 +152,11 @@ pub async fn update_election_event_statistics( Ok(()) } +/// Return the number of distinct voters that have cast a vote in an election event. +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. #[instrument(skip(transaction), err)] pub async fn get_count_distinct_voters( transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/election_event_status.rs b/packages/windmill/src/services/election_event_status.rs index 2280f2c3a4..7dd0e67b2f 100644 --- a/packages/windmill/src/services/election_event_status.rs +++ b/packages/windmill/src/services/election_event_status.rs @@ -1,8 +1,11 @@ -use std::collections::HashMap; - // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Reading and updating the aggregate voting status for election events. + +use std::collections::HashMap; + use crate::postgres::election::{get_election_by_id, get_elections, update_election_voting_status}; use crate::postgres::election_event::{get_election_event_by_id, update_election_event_status}; use anyhow::{anyhow, Context, Result}; @@ -15,15 +18,23 @@ use tracing::{event, info, instrument, Level}; use super::voting_status::update_board_on_status_change; +/// Deserialize an `ElectionEventStatus` from JSON. pub fn get_election_event_status(status_json_opt: Option) -> Option { status_json_opt.and_then(|status_json| deserialize_value(status_json).ok()) } +/// Deserialize an `ElectionStatus` from JSON. pub fn get_election_status(status_json_opt: Option) -> Option { status_json_opt.and_then(|status_json| deserialize_value(status_json).ok()) } #[instrument(err)] +/// Update the voting status for an election event and its elections. +/// +/// # Errors +/// +/// Returns an error if the election event/elections cannot be loaded, transitions are invalid, +/// or persistence/board updates fail. pub async fn update_event_voting_status( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -191,6 +202,11 @@ pub async fn update_event_voting_status( } #[instrument(err)] +/// Update the voting status for a single election within an event. +/// +/// # Errors +/// +/// Returns an error if the election/event cannot be loaded, transitions are invalid, or updates fail. pub async fn update_election_voting_status_impl( tenant_id: String, user_id: Option<&str>, diff --git a/packages/windmill/src/services/election_statistics.rs b/packages/windmill/src/services/election_statistics.rs index 0ea01a4045..f89a46af59 100644 --- a/packages/windmill/src/services/election_statistics.rs +++ b/packages/windmill/src/services/election_statistics.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Cross-election statistical queries used in dashboards and operational reports. + use anyhow::Result; use deadpool_postgres::Transaction; use sequent_core::services::uuid_validation::parse_uuid_v4; @@ -8,6 +11,11 @@ use tokio_postgres::row::Row; use tracing::instrument; #[instrument(skip(transaction), err)] +/// Increment election-level statistics counters for communications. +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. pub async fn update_election_statistics( transaction: &Transaction<'_>, tenant_id: &str, @@ -56,6 +64,11 @@ pub async fn update_election_statistics( } #[instrument(skip(transaction), err)] +/// Return the number of distinct voters that have cast a vote in an election. +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. pub async fn get_count_distinct_voters( transaction: &Transaction<'_>, tenant_id: &str, @@ -102,6 +115,11 @@ pub async fn get_count_distinct_voters( } #[instrument(skip(transaction), err)] +/// Return the number of areas associated with an election (via contests). +/// +/// # Errors +/// +/// Returns an error if SQL preparation/execution fails or ids are invalid UUIDs. pub async fn get_count_areas( transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/services/electoral_log.rs b/packages/windmill/src/services/electoral_log.rs index 118ffb7913..63fc424e0f 100644 --- a/packages/windmill/src/services/electoral_log.rs +++ b/packages/windmill/src/services/electoral_log.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Managing electoral log access: filters, ordering, cast-vote listings, and message counts. + use crate::services::celery_app::get_celery_app; use crate::services::database::PgConfig; use crate::services::insert_cast_vote::hash_voter_id; @@ -44,7 +46,9 @@ use tempfile::NamedTempFile; use tokio_stream::StreamExt; use tracing::{event, info, instrument, warn, Level}; +/// Maximum number of rows fetched from immudb in a single query. pub const IMMUDB_ROWS_LIMIT: usize = 2500; +/// Default maximum number of rows returned per page in list endpoints. pub const MAX_ROWS_PER_PAGE: usize = 50; /// Ballot_id input is the first half of the original hash which is stored in the electoral log. @@ -52,11 +56,15 @@ pub const BALLOT_ID_LENGTH_BYTES: usize = STRAND_HASH_LENGTH_BYTES / 2; /// Ballot_id input is in HEX, each byte is represented in 2 chars. pub const BALLOT_ID_LENGTH_CHARS: usize = BALLOT_ID_LENGTH_BYTES * 2; +/// Helper for creating and posting signed electoral-log messages. pub struct ElectoralLog { + /// Signing material used to build electoral-log messages. pub(crate) sd: SigningData, + /// immudb database name where the log is stored. pub(crate) elog_database: String, } +/// If the list contains exactly one election id, return it; otherwise return `None`. pub fn flatten_election_ids(election_ids: Option>) -> Option { election_ids.and_then(|ids| { if ids.len() == 1 { @@ -69,6 +77,11 @@ pub fn flatten_election_ids(election_ids: Option>) -> Option impl ElectoralLog { #[instrument(err, name = "ElectoralLog::new")] + /// Create a new ElectoralLog. + /// + /// # Errors + /// + /// Returns an error if the protocol manager cannot be loaded or the event id is missing. pub async fn new( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -97,6 +110,11 @@ impl ElectoralLog { } #[instrument(skip(sender_sk), err)] + /// Create a new ElectoralLog from a signing key. + /// + /// # Errors + /// + /// Returns an error if the protocol manager cannot be loaded or the election event id is missing. pub async fn new_from_sk( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -127,6 +145,10 @@ impl ElectoralLog { /// We need to pass in the log database because the vault /// will post a public key message if it needs to generates /// a signing key. + /// + /// # Errors + /// + /// Returns an error if the protocol manager cannot be loaded. #[instrument(skip(voter_signing_key), err)] pub async fn for_voter( hasura_transaction: &Transaction<'_>, @@ -159,6 +181,10 @@ impl ElectoralLog { /// We need to pass in the log database because the vault /// will post a public key message if it needs to generates /// a signing key. + /// + /// # Errors + /// + /// Returns an error if one of the operations fails. #[instrument(err, skip(hasura_transaction))] pub async fn for_admin_user( hasura_transaction: &Transaction<'_>, @@ -197,8 +223,12 @@ impl ElectoralLog { }) } - /// Posts a voter's public key #[instrument(err)] + /// Posts a voter's public key + /// + /// # Errors + /// + /// Returns an error if the protocol manager cannot be loaded or the message cannot be converted. pub async fn post_voter_pk( hasura_transaction: &Transaction<'_>, elog_database: &str, @@ -262,6 +292,10 @@ impl ElectoralLog { /// in the context of one event and the notification will only /// be present in its log, even if the corresponding signing private key /// would be used in other events. + /// + /// # Errors + /// + /// Returns an error if the protocol manager cannot be loaded or the message cannot be converted. pub async fn post_admin_pk( hasura_transaction: &Transaction<'_>, elog_database: &str, @@ -311,6 +345,11 @@ impl ElectoralLog { } #[instrument(skip(self, pseudonym_h, vote_h))] + /// Post a cast vote. + /// + /// # Errors + /// + /// Returns an error if the message cannot be converted. pub async fn post_cast_vote( &self, tenant_id: String, @@ -363,6 +402,11 @@ impl ElectoralLog { } #[instrument(skip(self, pseudonym_h))] + /// Post a cast vote error message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built/serialized or enqueueing fails. pub async fn post_cast_vote_error( &self, tenant_id: String, @@ -415,6 +459,10 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_election_published( &self, event_id: String, @@ -441,6 +489,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post an election open message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_election_open( &self, event_id: String, @@ -466,6 +519,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post an election pause message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_election_pause( &self, event_id: String, @@ -490,6 +548,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post an election close message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_election_close( &self, event_id: String, @@ -516,6 +579,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a keycloak event. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_keycloak_event( &self, event_id: String, @@ -540,6 +608,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a keygen message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_keygen( &self, event_id: String, @@ -555,6 +628,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a key insertion start message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_key_insertion_start( &self, event_id: String, @@ -572,6 +650,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a key insertion message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_key_insertion( &self, event_id: String, @@ -597,6 +680,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a tally open message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_tally_open( &self, event_id: String, @@ -614,6 +702,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a tally close message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub(crate) async fn post_tally_close( &self, event_id: String, @@ -631,6 +724,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a send template message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_send_template( &self, message: Option, @@ -652,6 +750,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a tally resumed with resolution message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_tally_resumed_with_resolution( &self, event_id: String, @@ -670,6 +773,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a tally paused pending resolution message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_tally_paused_pending_resolution( &self, event_id: String, @@ -688,6 +796,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a tally tie resolved message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_tally_tie_resolved( &self, event_id: String, @@ -717,6 +830,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a tally tie resolution updated message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_tally_tie_resolution_updated( &self, event_id: String, @@ -746,6 +864,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Post a certificate auth event message. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built or posted. pub async fn post_certificate_auth_event( &self, event_id: String, @@ -767,6 +890,11 @@ impl ElectoralLog { } #[instrument(skip(self), err)] + /// Post a message to the electoral log. + /// + /// # Errors + /// + /// Returns an error if the message cannot be converted. async fn post(&self, message: &Message) -> Result<()> { let board_message: ElectoralLogMessage = message.try_into()?; let ms = vec![board_message]; @@ -788,6 +916,10 @@ impl ElectoralLog { } /// Builds a keycloak event message and returns the resulting ElectoralLogMessage. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built. pub fn build_keycloak_event_message( &self, event_id: String, @@ -814,6 +946,10 @@ impl ElectoralLog { } /// Builds a send-template message and returns the resulting ElectoralLogMessage. + /// + /// # Errors + /// + /// Returns an error if the message cannot be built. pub fn build_send_template_message( &self, message_body: Option, @@ -840,6 +976,11 @@ impl ElectoralLog { } #[instrument(skip(self))] + /// Import electoral log messages from a CSV file. + /// + /// # Errors + /// + /// Returns an error if the message cannot be imported. pub async fn import_from_csv(&self, logs_file: &NamedTempFile) -> Result<()> { let batch_size: usize = PgConfig::from_env()?.default_sql_batch_size.try_into()?; let mut rdr = csv::Reader::from_reader(logs_file); @@ -886,6 +1027,7 @@ impl ElectoralLog { #[derive(Debug, Deserialize, Hash, PartialEq, Eq, EnumString, Display, Clone)] #[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] +#[allow(missing_docs)] pub enum OrderField { Id, Created, @@ -903,6 +1045,8 @@ pub enum OrderField { } #[derive(Deserialize, Debug, Default, Clone)] +/// Body for the get electoral log request. +#[allow(missing_docs)] pub struct GetElectoralLogBody { pub tenant_id: String, pub election_event_id: String, @@ -918,6 +1062,10 @@ pub struct GetElectoralLogBody { impl GetElectoralLogBody { // Returns the SQL clauses related to the request along with the parameters + /// + /// # Errors + /// + /// Returns an error if the SQL clauses cannot be built. #[instrument(ret)] fn as_sql(&self, to_count: bool) -> Result<(String, Vec)> { let mut clauses = Vec::new(); @@ -1080,6 +1228,8 @@ impl GetElectoralLogBody { } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Row for the electoral log. +#[allow(missing_docs)] pub struct ElectoralLogRow { pub id: i64, pub created: i64, @@ -1090,7 +1240,10 @@ pub struct ElectoralLogRow { pub user_id: Option, pub username: Option, } + +/// Parsed `statement.head` fields from a serialized electoral log message. #[derive(Deserialize, Serialize, Debug, Clone)] +#[allow(missing_docs)] pub struct StatementHeadDataString { pub event: String, pub kind: String, @@ -1101,34 +1254,46 @@ pub struct StatementHeadDataString { } impl ElectoralLogRow { + /// Database id of the log row. pub fn id(&self) -> i64 { self.id } + /// Row creation time. pub fn created(&self) -> i64 { self.created } + /// Statement timestamp from immudb. pub fn statement_timestamp(&self) -> i64 { self.statement_timestamp } + /// Statement kind string (e.g. cast vote vs audit). pub fn statement_kind(&self) -> &str { &self.statement_kind } + /// JSON string of the deserialized message payload. pub fn message(&self) -> &str { &self.message } + /// User id associated with the statement, if present. pub fn user_id(&self) -> Option<&str> { self.user_id.as_deref() } + /// Username associated with the statement, if present. pub fn username(&self) -> Option<&str> { self.username.as_deref() } + /// Parse `statement.head` from [`Self::message`] JSON. + /// + /// # Errors + /// + /// Returns an error if JSON is invalid or expected `statement` / `head` keys are missing. pub fn statement_head_data(&self) -> Result { let message: serde_json::Value = deserialize_str(&self.message).map_err(|err| { anyhow!(format!( @@ -1247,6 +1412,8 @@ impl TryFrom<&Row> for ElectoralLogRow { } #[derive(Serialize, Deserialize, Debug, Clone, Default)] +/// One cast-vote row extracted from the electoral log for API responses. +#[allow(missing_docs)] pub struct CastVoteEntry { pub statement_timestamp: i64, pub statement_kind: String, @@ -1255,13 +1422,21 @@ pub struct CastVoteEntry { pub message: Option, } +/// Paginated cast-vote messages list plus total match count. #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct CastVoteMessagesOutput { + /// Filtered entries (up to the requested page size). pub list: Vec, + /// Total rows matching the immudb filter before client-side ballot trimming. pub total: usize, } impl CastVoteEntry { + /// Build a [`CastVoteEntry`] when `entry` is a cast-vote statement. + /// + /// # Errors + /// + /// Returns an error if the binary message cannot be deserialized. pub fn from_elog_message(entry: &ElectoralLogMessage) -> Result, anyhow::Error> { let ballot_id = entry.ballot_id.clone().unwrap_or_default(); let username = entry.username.clone(); @@ -1280,6 +1455,11 @@ impl CastVoteEntry { } #[instrument(err)] +/// List electoral log messages matching the provided filters. +/// +/// # Errors +/// +/// Returns an error if the immudb session cannot be opened, SQL cannot be built, or the query fails. pub async fn list_electoral_log(input: GetElectoralLogBody) -> Result> { let mut client: Client = get_immudb_client().await?; let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; @@ -1354,6 +1534,7 @@ pub async fn list_electoral_log(input: GetElectoralLogBody) -> Result Result { let mut client = get_immudb_client().await?; let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; diff --git a/packages/windmill/src/services/event_list.rs b/packages/windmill/src/services/event_list.rs index facaf3ee3b..00ce901131 100644 --- a/packages/windmill/src/services/event_list.rs +++ b/packages/windmill/src/services/event_list.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing scheduled maintenance event listing and pagination. + use crate::postgres::election_event::get_election_event_by_id; use crate::{ postgres::scheduled_event::{insert_new_scheduled_event, insert_scheduled_event}, @@ -19,6 +22,8 @@ use strum_macros::EnumString; use tracing::{info, instrument}; #[derive(Serialize, Deserialize, Debug, Clone)] +/// Single scheduled event row formatted for the event list endpoint. +#[allow(clippy::missing_docs_in_private_items)] pub struct GetEventListOutput { election: String, schedule: Option, @@ -30,14 +35,19 @@ pub struct GetEventListOutput { } #[derive(Serialize, Deserialize, Debug)] +/// Paginated output for the event list endpoint. pub struct EventListOutput { + /// Page items. pub items: Vec, + /// Total number of matching items. pub total: i32, } #[derive(Debug, Deserialize, Hash, PartialEq, Eq, EnumString)] #[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] +/// Column or sort key accepted by the event list query builder. +#[allow(missing_docs)] pub enum OrderField { Election, EventType, @@ -47,6 +57,8 @@ pub enum OrderField { } #[derive(Debug, Deserialize)] +/// Input parameters for listing scheduled events. +#[allow(missing_docs)] pub struct GetEventListInput { pub tenant_id: String, pub election_event_id: String, diff --git a/packages/windmill/src/services/export/export_application.rs b/packages/windmill/src/services/export/export_application.rs index 1bb466ef83..0c87cb6346 100644 --- a/packages/windmill/src/services/export/export_application.rs +++ b/packages/windmill/src/services/export/export_application.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV export of voter applications for an election event: load rows from Postgres, serialize to CSV, +//! upload to the database and s3. use crate::postgres::application::get_applications_by_election; use crate::services::database::get_hasura_pool; use crate::services::documents::upload_and_return_document; @@ -17,6 +19,11 @@ use sequent_core::util::temp_path::{ use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; +/// Loads all [`Application`] rows for the given tenant/event, optionally filtered by `election_id`. +/// +/// # Errors +/// +/// Propagates database errors from [`get_applications_by_election`]. #[instrument(err, skip(transaction))] pub async fn read_export_data( transaction: &Transaction<'_>, @@ -36,6 +43,12 @@ pub async fn read_export_data( Ok(applications) } +/// Writes `temp_file_path` rows to a CSV temp file and registers it as a document. +/// +/// # Errors +/// +/// Returns an error when CSV serialization fails, the temp file cannot be written, +/// no rows are provided, or the document upload fails. #[instrument(err, skip(transaction))] pub async fn write_export_document( transaction: &Transaction<'_>, @@ -116,6 +129,11 @@ pub async fn write_export_document( } } +/// Orchestrates read + write to the database and s3. +/// +/// # Errors +/// +/// Propagates errors from [`read_export_data`] or [`write_export_document`]. #[instrument(err)] pub async fn process_export( tenant_id: &str, diff --git a/packages/windmill/src/services/export/export_ballot_publication.rs b/packages/windmill/src/services/export/export_ballot_publication.rs index 8463440e54..7108a11e60 100644 --- a/packages/windmill/src/services/export/export_ballot_publication.rs +++ b/packages/windmill/src/services/export/export_ballot_publication.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Ballot publication exports: JSON bundles of per-publication ballot EMLs and retrieval of the event config artifact from public object storage. use crate::postgres::ballot_publication::get_ballot_publication; use crate::postgres::ballot_style::export_event_ballot_styles; use crate::services::ballot_styles::ballot_style::EVENT_CONFIG_FILE_NAME; @@ -20,6 +21,12 @@ use serde_json::{json, Map, Value}; use tempfile::TempPath; use tracing::{event, info, instrument, Level}; +/// Persists `data` as a JSON file and optionally uploads to the database and s3. +/// +/// # Errors +/// +/// Returns an error when the temp file cannot be created, +/// upload fails (when `to_upload`), or the path cannot be read back. #[instrument(err, skip(transaction, data))] pub async fn write_export_document( transaction: &Transaction<'_>, @@ -54,6 +61,12 @@ pub async fn write_export_document( Ok(_temp_path) } +/// Builds one JSON object per ballot publication. +/// +/// # Errors +/// +/// Propagates style lookup failures, EML deserialization errors, +/// JSON serialization errors, or upload/temp-file errors. #[instrument(err)] pub async fn process_export_ballot_publication( hasura_transaction: &Transaction<'_>, @@ -116,6 +129,11 @@ pub async fn process_export_ballot_publication( Ok(temp_path) } +/// Loads ballot publications for the event and creates a temporary file. +/// +/// # Errors +/// +/// Propagates database read errors or processing failures. #[instrument(err)] pub async fn export_ballot_publications( hasura_transaction: &Transaction<'_>, @@ -143,6 +161,10 @@ pub async fn export_ballot_publications( } /// Exports election event config file which created at ballot publication generation. +/// +/// # Errors +/// +/// Returns an error when bucket configuration, key resolution, or S3 download fails. // #[instrument(err)] pub async fn export_election_event_config_file( tenant_id: &str, diff --git a/packages/windmill/src/services/export/export_bulletin_boards.rs b/packages/windmill/src/services/export/export_bulletin_boards.rs index 2eda146a7e..8d37ca59ca 100644 --- a/packages/windmill/src/services/export/export_bulletin_boards.rs +++ b/packages/windmill/src/services/export/export_bulletin_boards.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV exports of ImmuDB/bulletin-board message rows and protocol-manager secrets for an election event. use crate::postgres::election::get_elections; use crate::postgres::keys_ceremony::get_keys_ceremonies; use crate::postgres::trustee::get_all_trustees; @@ -25,21 +26,35 @@ use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; lazy_static! { + /// Validates bulletin-board CSV column names (alphanumeric, dot, underscore, hyphen). pub static ref HEADER_RE: Regex = Regex::new(r"^[a-zA-Z0-9._-]+$").unwrap(); + /// CSV header: owning election id (empty string for the event-level board). pub static ref ELECTION_ID_COL_NAME: String = String::from("election_id"); + /// CSV header: message row id. pub static ref ID_COL_NAME: String = String::from("id"); + /// CSV header: row creation timestamp. pub static ref CREATED_COL_NAME: String = "created".to_string(); + /// CSV header: sender public key. pub static ref SENDER_PK_COL_NAME: String = "sender_pk".to_string(); + /// CSV header: statement timestamp. pub static ref STATEMENT_TIMESTAMP_COL_NAME: String = "statement_timestamp".to_string(); + /// CSV header: statement kind discriminator. pub static ref STATEMENT_COL_NAME: String = "statement_kind".to_string(); + /// CSV header: batch index. pub static ref BATCH_COL_NAME: String = "batch".to_string(); + /// CSV header: mix round number. pub static ref MIX_NUMBER_COL_NAME: String = "mix_number".to_string(); + /// CSV header: base64-encoded payload. pub static ref MESSAGE_COL_NAME: String = "message".to_string(); + /// CSV header: row schema/version tag. pub static ref VERSION_COL_NAME: String = "version".to_string(); + /// CSV header used in trustee config exports (trustee display name). pub static ref TRUSTEE_NAME_COL_NAME: String = "trustee".to_string(); + /// CSV header for trustee-side configuration blob. pub static ref TRUSTEE_CONFIG_COL_NAME: String = "config".to_string(); } +/// Converts a single B3 bulletin-board row into a CSV record (message is standard base64, no padding). #[instrument] fn get_board_record(election_id: &str, row: B3MessageRow) -> Vec { let message_b64 = general_purpose::STANDARD_NO_PAD.encode(row.message.clone()); @@ -57,6 +72,11 @@ fn get_board_record(election_id: &str, row: B3MessageRow) -> Vec { ] } +/// Writes all boards in `boards_map` to a comma-separated CSV temp file. +/// +/// # Errors +/// +/// Returns an error when temp file creation, CSV writes, or size checks fail. #[instrument(err)] async fn create_boards_csv(boards_map: HashMap>) -> Result { let mut writer = csv::WriterBuilder::new().delimiter(b',').from_writer( @@ -105,6 +125,11 @@ async fn create_boards_csv(boards_map: HashMap>) -> Re Ok(temp_path) } +/// Fetches every bulletin board for the event and creates a temporary file. +/// +/// # Errors +/// +/// Propagates missing `ENV_SLUG`, database errors, B3 client failures, or [`create_boards_csv`] errors. #[instrument(err, skip(transaction))] pub async fn read_election_event_boards( transaction: &Transaction<'_>, @@ -135,6 +160,12 @@ pub async fn read_election_event_boards( create_boards_csv(boards_map).await } +/// Exports protocol-manager shared secrets for the event board and each +/// election board as two-column CSV (`election_id`, `key`). +/// +/// # Errors +/// +/// Propagates vault read failures, missing secrets, oversize output, or database/env errors. #[instrument(err, skip(transaction))] pub async fn read_protocol_manager_keys( transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/export/export_election_event.rs b/packages/windmill/src/services/export/export_election_event.rs index b67ee77071..2dc90d6028 100644 --- a/packages/windmill/src/services/export/export_election_event.rs +++ b/packages/windmill/src/services/export/export_election_event.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Full election-event export: aggregates importable Hasura schema, +//! Keycloak realm data, optional voters, media, tally CSVs, and related artifacts into a ZIP +//! (optionally encrypted). use crate::postgres::application::get_applications_by_election; use crate::postgres::area::get_event_areas; use crate::postgres::area_contest::export_area_contests; @@ -58,6 +61,11 @@ use crate::services::consolidation::aes_256_cbc_encrypt::encrypt_file_aes_256_cb use crate::services::documents::upload_and_return_document; use crate::services::password; +/// Builds the structured export data and temp paths for images referenced by document ids. +/// # Errors +/// +/// Propagates parallel query failures, Keycloak admin errors, +/// UUID parse errors, or image download failures. #[instrument(err, skip(transaction))] pub async fn read_export_data( transaction: &Transaction<'_>, @@ -168,6 +176,11 @@ pub async fn read_export_data( Ok((import_election_event_schema, images_files_path)) } +/// Encrypts `temp_path_string` into `encrypted_temp_file_string` using AES-256-CBC with `password`. +/// +/// # Errors +/// +/// Returns an error when the encryption helper fails. #[instrument(err)] pub async fn generate_encrypted_zip( temp_path_string: String, @@ -180,6 +193,11 @@ pub async fn generate_encrypted_zip( Ok(()) } +/// Writes `data` as JSON into a [`NamedTempFile`]. +/// +/// # Errors +/// +/// Returns an error when serialization or temp file IO fails. pub async fn write_export_document(data: ImportElectionEventSchema) -> Result { // Serialize the data into JSON string let data_str = serde_json::to_string_pretty(&data)?; @@ -192,6 +210,11 @@ pub async fn write_export_document(data: ImportElectionEventSchema) -> Result, @@ -242,6 +270,11 @@ pub async fn get_image_file_from_s3( Ok(Some(temp_file.into_temp_path())) } +/// Generic helper to download images from S3 for each contest, candidate, or election. +/// +/// # Errors +/// +/// Propagates failures from [`get_image_file_from_s3`]. #[instrument(err, skip(hasura_transaction, items, s3_bucket, get_document_id))] async fn process_images( hasura_transaction: &Transaction<'_>, @@ -270,6 +303,11 @@ where Ok(s3_files) } +/// Downloads election images from S3. +/// +/// # Errors +/// +/// Propagates [`process_images`] failures. pub async fn process_election_images( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -282,6 +320,11 @@ pub async fn process_election_images( .await } +/// Downloads contest images from S3. +/// +/// # Errors +/// +/// Propagates [`process_images`] failures. pub async fn process_contests_images( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -294,6 +337,11 @@ pub async fn process_contests_images( .await } +/// Downloads candidate images from S3. +/// +/// # Errors +/// +/// Propagates [`process_images`] failures. pub async fn process_candidates_images( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -306,6 +354,11 @@ pub async fn process_candidates_images( .await } +/// Fetches election, contest, and candidate images from the public bucket in parallel. +/// +/// # Errors +/// +/// Propagates bucket resolution failures or any image download failures. #[instrument(err, skip(hasura_transaction, elections, contests, candidates))] pub async fn process_event_images( hasura_transaction: &Transaction<'_>, @@ -329,6 +382,13 @@ pub async fn process_event_images( Ok(s3_files) } +/// Builds the multi-artifact ZIP described by `export_config`, optionally encrypts it, +/// uploads to the database and s3. +/// +/// # Errors +/// +/// Returns an error on missing passwords when required, +/// IO/ZIP failures, subgraph export helper failures, or upload failures. #[instrument(err)] pub async fn process_export_zip( tenant_id: &str, diff --git a/packages/windmill/src/services/export/export_schedule_events.rs b/packages/windmill/src/services/export/export_schedule_events.rs index 220353e213..dbf1833448 100644 --- a/packages/windmill/src/services/export/export_schedule_events.rs +++ b/packages/windmill/src/services/export/export_schedule_events.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV export of scheduled automation events for an election event. use crate::postgres::scheduled_event::find_scheduled_event_by_election_event_id; use crate::services::documents::upload_and_return_document; use crate::services::providers::transactions_provider::provide_hasura_transaction; @@ -13,6 +14,11 @@ use sequent_core::util::temp_path::write_into_named_temp_file; use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; +/// Loads all [`ScheduledEvent`] rows for the tenant/event pair. +/// +/// # Errors +/// +/// Propagates database errors from the scheduled-event query. #[instrument(err, skip(transaction))] pub async fn read_export_data( transaction: &Transaction<'_>, @@ -27,6 +33,12 @@ pub async fn read_export_data( Ok(scheduled_events) } +/// Serializes `data` to CSV, optionally uploads to the database and s3. +/// +/// # Errors +/// +/// Returns an error when JSON introspection fails, CSV writing fails, +/// temp file creation fails, or upload fails. #[instrument(err, skip(transaction))] pub async fn write_export_document( data: Vec, @@ -103,6 +115,11 @@ pub async fn write_export_document( Ok(temp_path) } +/// Orchestrates read + write to the database and s3. +/// +/// # Errors +/// +/// Propagates errors from the read/write helpers or transaction provider. #[instrument(err)] pub async fn process_export( tenant_id: &str, diff --git a/packages/windmill/src/services/export/export_tally.rs b/packages/windmill/src/services/export/export_tally.rs index ec51acb6bc..ba10a72f87 100644 --- a/packages/windmill/src/services/export/export_tally.rs +++ b/packages/windmill/src/services/export/export_tally.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV exports of tally sessions, executions, and derived results tables. use crate::{ postgres::{ results_area_contest::get_event_results_area_contest, @@ -34,8 +35,14 @@ use std::{future::Future, pin::Pin}; use tempfile::TempPath; use tracing::{info, instrument}; +/// Result of a single tally CSV export: stable file stem plus temp path. type ExportResult = Result<(String, TempPath), anyhow::Error>; +/// Serializes all [`TallySession`] rows for the event to the tally-session CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_tally_session( hasura_transaction: &Transaction<'_>, @@ -99,6 +106,11 @@ pub async fn export_tally_session( Ok((file_name, temp_path)) } +/// Serializes [`TallySessionExecution`] rows to the tally-session execution CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_tally_session_execution( hasura_transaction: &Transaction<'_>, @@ -156,6 +168,11 @@ pub async fn export_tally_session_execution( Ok((file_name, temp_path)) } +/// Serializes [`TallySessionContest`] rows to the tally-session contest CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_tally_session_contest( hasura_transaction: &Transaction<'_>, @@ -213,6 +230,11 @@ pub async fn export_tally_session_contest( Ok((file_name, temp_path)) } +/// Serializes [`ResultsEvent`] rows to the results event CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_event( hasura_transaction: &Transaction<'_>, @@ -265,6 +287,11 @@ pub async fn export_results_event( Ok((file_name, temp_path)) } +/// Serializes [`ResultsElectionArea`] rows to the results election area CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_election_area( hasura_transaction: &Transaction<'_>, @@ -320,6 +347,11 @@ pub async fn export_results_election_area( Ok((file_name, temp_path)) } +/// Serializes [`ResultsElection`] rows to the results election CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_election( hasura_transaction: &Transaction<'_>, @@ -377,6 +409,11 @@ pub async fn export_results_election( Ok((file_name, temp_path)) } +/// Serializes [`ResultsContest`] rows to the results contest CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_contest( hasura_transaction: &Transaction<'_>, @@ -449,6 +486,11 @@ pub async fn export_results_contest( Ok((file_name, temp_path)) } +/// Serializes [`ResultsContestCandidate`] rows to the results contest candidate CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_contest_candidate( hasura_transaction: &Transaction<'_>, @@ -510,6 +552,11 @@ pub async fn export_results_contest_candidate( Ok((file_name, temp_path)) } +/// Serializes [`ResultsAreaContest`] rows to the results area contest CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_area_contest( hasura_transaction: &Transaction<'_>, @@ -582,6 +629,11 @@ pub async fn export_results_area_contest( Ok((file_name, temp_path)) } +/// Serializes [`ResultsAreaContestCandidate`] rows to the results area contest candidate CSV. +/// +/// # Errors +/// +/// Propagates database errors, JSON serialization errors, or CSV/temp-file failures. #[instrument(err, skip(hasura_transaction))] pub async fn export_results_area_contest_candidate( hasura_transaction: &Transaction<'_>, @@ -646,6 +698,7 @@ pub async fn export_results_area_contest_candidate( Ok((file_name, temp_path)) } +/// Futures for every tally/results CSV export that [`read_tally_data`] runs concurrently. fn get_export_tasks<'a>( hasura_transaction: &'a Transaction<'a>, tenant_id: &'a str, @@ -705,6 +758,15 @@ fn get_export_tasks<'a>( ] } +/// Awaits all tally/results export tasks; fails fast if any branch errors. +/// +/// # Panics +/// +/// Panics if any joined task reports `Ok` but the result is missing after an earlier error check (should be unreachable). +/// +/// # Errors +/// +/// Returns an error wrapping the first failing export’s error string. #[instrument(err)] pub async fn read_tally_data( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/export/export_tally_results.rs b/packages/windmill/src/services/export/export_tally_results.rs index 8d6644614a..aa4d1cfcb5 100644 --- a/packages/windmill/src/services/export/export_tally_results.rs +++ b/packages/windmill/src/services/export/export_tally_results.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Converts tally SQLite result databases into XLSX spreadsheets +//! and updates tally session execution document pointers. use crate::postgres::document::get_document; use crate::postgres::tally_session_execution::get_last_tally_session_execution; use crate::postgres::tally_session_execution::update_tally_session_execution_documents; @@ -17,8 +19,20 @@ use sequent_core::types::ceremonies::TallySessionDocuments; use std::path::Path; use tracing::instrument; +/// Excel per-cell character limit enforced when copying SQLite text into worksheets. const EXCEL_STRING_LIMIT: usize = 32767; +/// Builds an XLSX from the execution’s SQLite results artifact, +/// uploads it, and stores the new document id on the execution. +/// +/// # Errors +/// +/// Returns an error when the SQLite document is missing, +/// temp download fails, conversion fails, DB update fails, or upload fails. +/// +/// # Panics +/// +/// Panics if the SQLite document lookup unexpectedly returns `None` after the prior checks. #[instrument(err)] pub async fn export_tally_results_to_xlsx( hasura_transaction: &Transaction<'_>, @@ -97,6 +111,7 @@ pub async fn export_tally_results_to_xlsx( Ok(()) } +/// Truncates `value_str` to the Excel’s max string length. fn truncate_string_for_excel(value_str: String) -> String { let truncated_text = if value_str.len() > EXCEL_STRING_LIMIT { value_str @@ -111,6 +126,13 @@ fn truncate_string_for_excel(value_str: String) -> String { /// Converts a SQLite database file to an XLSX file, with each table as a worksheet. /// +/// # Panics +/// +/// Panics if the worksheet row counter overflows `u32` bounds (pathological table sizes). +/// +/// # Errors +/// +/// Propagates SQLite open/query errors or XLSX writer failures. #[instrument(err)] async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { let db_conn = Connection::open(db_path)?; @@ -182,6 +204,17 @@ async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { Ok(()) } +/// Resolves the latest tally session execution for `tally_session_id` +/// and extracts its documents plus results event/execution ids. +/// +/// # Errors +/// +/// Returns an error when no execution exists, documents are absent, +/// SQLite id is missing, or JSON handling fails. +/// +/// # Panics +/// +/// Panics if `documents` is unexpectedly `None` after the prior check. #[instrument(err)] pub async fn get_tally_session_execution_results_sqlite_file( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/export/export_tasks_execution.rs b/packages/windmill/src/services/export/export_tasks_execution.rs index 3a4a205b80..77e9d62977 100644 --- a/packages/windmill/src/services/export/export_tasks_execution.rs +++ b/packages/windmill/src/services/export/export_tasks_execution.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! JSON export of Celery task execution records for an election event. use crate::postgres::tasks_execution::get_tasks_by_election_event_id; use crate::services::database::get_hasura_pool; use crate::services::documents::upload_and_return_document; @@ -14,6 +15,11 @@ use serde::{Deserialize, Serialize}; use serde_json::{json, Map, Value}; use tracing::{event, info, instrument, Level}; +/// Loads all [`TasksExecution`] rows for the event. +/// +/// # Errors +/// +/// Propagates database errors from [`get_tasks_by_election_event_id`]. #[instrument(err, skip(transaction))] pub async fn read_export_data( transaction: &Transaction<'_>, @@ -25,6 +31,13 @@ pub async fn read_export_data( Ok(tasks) } +/// Serializes executions to JSON, writes a temp file, +/// and uploads to the database and s3. +/// +/// # Errors +/// +/// Returns an error when serialization fails, no tasks exist, +/// temp file creation fails, or upload fails. #[instrument(err, skip(transaction))] pub async fn write_export_document( transaction: &Transaction<'_>, @@ -59,6 +72,11 @@ pub async fn write_export_document( } } +/// Orchestrates read + write to the database and s3. +/// +/// # Errors +/// +/// Propagates pool acquisition, transaction, read/write, or commit failures. #[instrument(err)] pub async fn process_export( tenant_id: &str, diff --git a/packages/windmill/src/services/export/export_template.rs b/packages/windmill/src/services/export/export_template.rs index b8356abb14..eee6f74e0f 100644 --- a/packages/windmill/src/services/export/export_template.rs +++ b/packages/windmill/src/services/export/export_template.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV export of communication templates for a tenant. use crate::postgres::template::get_templates_by_tenant_id; use crate::services::database::get_hasura_pool; use crate::services::documents::upload_and_return_document; @@ -14,6 +15,11 @@ use serde::{Deserialize, Serialize}; use serde_json::{json, Map, Value}; use tracing::{event, info, instrument, Level}; +/// Loads templates for `tenant_id`. +/// +/// # Errors +/// +/// Propagates database errors. #[instrument(err, skip(transaction))] pub async fn read_export_data( transaction: &Transaction<'_>, @@ -39,6 +45,16 @@ pub async fn read_export_data( Ok(transformed_templates) } +/// Writes `data` as CSV and uploads to the database and s3. +/// +/// # Panics +/// +/// Panics if any row has `created_at` or `updated_at` set to `None`. +/// +/// # Errors +/// +/// Returns an error when CSV serialization fails, no templates exist, +/// temp file creation fails, or upload fails. #[instrument(err, skip(transaction, data))] pub async fn write_export_document( transaction: &Transaction<'_>, @@ -107,6 +123,11 @@ pub async fn write_export_document( } } +/// Orchestrates read + write to the database and s3. +/// +/// # Errors +/// +/// Propagates pool, transaction, read, write, or commit failures. #[instrument(err)] pub async fn process_export(tenant_id: &str, document_id: &str) -> Result<()> { let mut hasura_db_client: DbClient = get_hasura_pool() diff --git a/packages/windmill/src/services/export/export_tenant.rs b/packages/windmill/src/services/export/export_tenant.rs index 6c5d7679bf..862058210f 100644 --- a/packages/windmill/src/services/export/export_tenant.rs +++ b/packages/windmill/src/services/export/export_tenant.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV export of core tenant metadata. use crate::postgres::tenant::get_tenant_by_id; use crate::types::documents::EDocuments; @@ -13,6 +14,11 @@ use sequent_core::util::temp_path::write_into_named_temp_file; use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; +/// Loads the tenant row identified by `tenant_id`. +/// +/// # Errors +/// +/// Propagates database errors from [`get_tenant_by_id`]. #[instrument(err, skip(transaction))] pub async fn read_tenant_export_data( transaction: &Transaction<'_>, @@ -23,6 +29,12 @@ pub async fn read_tenant_export_data( Ok(tenant) } +/// Serializes `data` as one header row plus one value row into a CSV temp file. +/// +/// # Errors +/// +/// Returns an error when JSON flattening fails, +/// CSV serialization fails, or temp file creation fails. #[instrument(err, skip(transaction))] pub async fn write_export_document( data: Tenant, diff --git a/packages/windmill/src/services/export/export_tenant_config.rs b/packages/windmill/src/services/export/export_tenant_config.rs index 596ed9fc93..44ea09b9eb 100644 --- a/packages/windmill/src/services/export/export_tenant_config.rs +++ b/packages/windmill/src/services/export/export_tenant_config.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! ZIP bundle export combining tenant CSV, full Keycloak realm JSON, and a roles-to-permissions CSV. use crate::services::documents::upload_and_return_document; use crate::services::export::export_tenant; @@ -18,6 +19,11 @@ use tempfile::NamedTempFile; use tracing::{event, info, instrument, Level}; use zip::write::FileOptions; +/// Serializes a Keycloak [`RealmRepresentation`] to a temporary JSON file. +/// +/// # Errors +/// +/// Returns an error when serialization or temp file IO fails. pub async fn write_export_keycloak_config(data: RealmRepresentation) -> Result { // Serialize the data into JSON string let data_str = serde_json::to_string(&data)?; @@ -30,6 +36,11 @@ pub async fn write_export_keycloak_config(data: RealmRepresentation) -> Result // // SPDX-License-Identifier: AGPL-3.0-only +//! ZIP export of per-trustee vault secrets. use super::export_election_event::generate_encrypted_zip; use crate::postgres::trustee::get_all_trustees; use crate::services::documents::upload_and_return_document; @@ -16,6 +17,14 @@ use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; use zip::write::FileOptions; +/// Packages every trustee secret for `tenant_id` into a ZIP, +/// encrypts it with `encryption_password`, uploads to the database and s3, +/// and removes the plaintext zip. +/// +/// # Errors +/// +/// Propagates trustee lookup failures, missing vault secrets, +/// ZIP/encryption errors, or document upload failures. #[instrument(err, skip(transaction))] pub async fn read_trustees_config_base( transaction: &Transaction<'_>, @@ -101,6 +110,12 @@ pub async fn read_trustees_config_base( Ok(()) } +/// Orchestrates read + write to the database and s3. +/// +/// # Errors +/// +/// Propagates the inner export error or +/// task-status update failures. #[instrument(err, skip(transaction))] pub async fn read_trustees_config( transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/export/export_users.rs b/packages/windmill/src/services/export/export_users.rs index 28b12f7abb..2215d9a8f0 100644 --- a/packages/windmill/src/services/export/export_users.rs +++ b/packages/windmill/src/services/export/export_users.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! CSV export of Keycloak users. use crate::postgres::area::get_areas_by_id; use crate::services::database::{get_keycloak_pool, PgConfig}; @@ -22,9 +23,11 @@ use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; lazy_static! { + /// Characters stripped from characters when building dynamic CSV column names. static ref SAFE_CHARS_RE: Regex = Regex::new(r"[^a-zA-Z0-9._-]").unwrap(); } +/// Keycloak built-in user profile attribute names excluded from the “custom attributes” header block. pub const USER_FIELDS: [&str; 8] = [ "id", "email", @@ -36,36 +39,52 @@ pub const USER_FIELDS: [&str; 8] = [ "area-id", ]; +/// Request body shape for HTTP handlers that export event-scoped users. #[derive(Deserialize, Debug, Clone, Serialize)] pub struct ExportUsersBody { + /// Owning tenant id. pub tenant_id: String, + /// Event whose realm and areas should drive the export. pub election_event_id: Option, + /// Optional election id for vote-info enrichment. pub election_id: Option, } +/// Request body for tenant users export. #[derive(Deserialize, Debug, Clone, Serialize)] pub struct ExportTenantUsersBody { + /// Tenant whose users are listed. pub tenant_id: String, } +/// Discriminator for [`export_users_file`] describing realm scope and listing mode. #[derive(Serialize, Deserialize, Debug, Clone)] pub enum ExportBody { + /// Export users from the event realm with optional election filters. Users { + /// Owning tenant id. tenant_id: String, + /// Event realm selector. election_event_id: Option, + /// Optional election id filter. election_id: Option, }, + /// Export users from the tenant realm only. TenantUsers { + /// Owning tenant id. tenant_id: String, }, } +/// Replaces any character not in `[A-Za-z0-9._-]` with `_` for safe CSV header suffixes. #[instrument(level = "trace")] fn sanitize_name(name: &str) -> String { // Replace all characters not matching the regex with an underscore '_' SAFE_CHARS_RE.replace_all(name, "_").to_string() } +/// Builds CSV headers: fixed user columns, custom Keycloak profile attributes, +/// and one column per election. #[instrument(skip(elections))] fn get_headers( elections: &Option>, @@ -106,6 +125,7 @@ fn get_headers( .concat() } +/// Flattens a [`User`] into a CSV row aligned with the headers. #[instrument(skip(elections, areas_by_id, user_attributes), level = "trace")] fn get_user_record( elections: &Option>, @@ -163,6 +183,15 @@ fn get_user_record( .concat() } +/// Paginates through Keycloak users and writes a CSV temp file, enforcing configured max upload size. +/// +/// # Panics +/// +/// Panics if a single page contains more than `i32::MAX` users or if the running offset overflows `i32`. +/// +/// # Errors +/// +/// Propagates pool/transaction errors, listing failures, CSV write failures, or oversize output. #[instrument(err, skip(hasura_transaction))] pub async fn export_users_file( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/export/mod.rs b/packages/windmill/src/services/export/mod.rs index b8fed5d3b1..b6739a5d70 100644 --- a/packages/windmill/src/services/export/mod.rs +++ b/packages/windmill/src/services/export/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Export pipelines that serialize elections, tenants, tallies, and related artifacts for backup or migration. + pub mod export_application; pub mod export_ballot_publication; pub mod export_bulletin_boards; diff --git a/packages/windmill/src/services/folders.rs b/packages/windmill/src/services/folders.rs index 7014144a98..5bbebffe83 100644 --- a/packages/windmill/src/services/folders.rs +++ b/packages/windmill/src/services/folders.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! S3 folder path helpers for organizing export folders. + use anyhow::{Context, Result}; use fs_extra::dir::{self, CopyOptions}; use std::path::PathBuf; @@ -10,6 +13,11 @@ use tracing::{info, instrument}; use walkdir::WalkDir; #[instrument(err)] +/// Copy a directory to a temporary directory. +/// +/// # Errors +/// +/// Returns an error if temporary directory creation or filesystem copying fails. pub fn copy_to_temp_dir(base_tally_path: &PathBuf) -> Result { // Create a temporary directory let temp_dir = tempdir()?; @@ -38,6 +46,11 @@ pub fn copy_to_temp_dir(base_tally_path: &PathBuf) -> Result { } #[instrument] +/// List the files in a directory. +/// +/// # Errors +/// +/// Returns an error if directory traversal fails. pub fn list_files(dir: &Path) -> Result<()> { for entry in fs::read_dir(dir)? { let entry = entry?; diff --git a/packages/windmill/src/services/generate_preview_url.rs b/packages/windmill/src/services/generate_preview_url.rs index 7623016a40..9f4f3632c4 100644 --- a/packages/windmill/src/services/generate_preview_url.rs +++ b/packages/windmill/src/services/generate_preview_url.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Generating preview URLs for documents and assets. + use crate::{ postgres::{document::get_document, preview::insert_preview}, services::{ @@ -22,6 +25,11 @@ use tracing::instrument; use uuid::Uuid; #[instrument(err)] +/// Construct the preview URL. +/// +/// # Errors +/// +/// Returns an error if required configuration is missing. pub fn construct_preview_url( tenant_id: &str, document_id: &str, @@ -39,6 +47,11 @@ pub fn construct_preview_url( } #[instrument(err)] +/// Get ballot style and area ids from the preview file. +/// +/// # Errors +/// +/// Returns an error if the preview file cannot be read or is missing required fields. pub async fn get_document_data(preview_file_path: &str) -> Result<(String, String)> { let file = File::open(preview_file_path) .map_err(|e| anyhow::anyhow!("Failed to open preview file: {}", e))?; @@ -70,6 +83,12 @@ pub async fn get_document_data(preview_file_path: &str) -> Result<(String, Strin } #[instrument(err)] +/// Get the uploaded document of the ballot style and generate the preview URL, +/// while uploading the preview document to S3 tenant folder. +/// +/// # Errors +/// +/// Returns an error if the input document cannot be fetched, parsed, uploaded, or preview data cannot be inserted. pub async fn generate_preview_url( hasura_transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/services/google_meet.rs b/packages/windmill/src/services/google_meet.rs index fce9eefaa7..63b2d91629 100644 --- a/packages/windmill/src/services/google_meet.rs +++ b/packages/windmill/src/services/google_meet.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Google Meet conference lifecycle helpers. + use crate::postgres::tenant::get_tenant_by_id; use deadpool_postgres::Transaction; use google_calendar3::{ @@ -19,30 +21,49 @@ use strum_macros::EnumString; use tracing::{error, info, instrument}; #[derive(Deserialize, Debug, Clone)] +/// Input payload for generating a Google Meet link by creating a calendar event. pub struct GenerateGoogleMeetBody { + /// Event summary/title. pub summary: String, + /// Event description. pub description: String, + /// Event start datetime in ISO-8601 format. pub start_date_time: String, + /// Event end datetime in ISO-8601 format. pub end_date_time: String, + /// Time zone identifier for the event (e.g. "Europe/Madrid"). pub time_zone: String, + /// Attendee email list. pub attendee_emails: Vec, } #[derive(Serialize, Deserialize, Debug)] +/// Output payload containing the generated Meet link, when available. pub struct GenerateGoogleMeetResponse { + /// Generated Google Meet URL. pub meet_link: Option, } #[derive(Serialize, Deserialize, Debug, EnumString)] +/// Error variants returned while generating a Google Meet link. pub enum GoogleMeetError { + /// Missing or invalid service-account/client-secret configuration. ClientSecret(String), + /// JSON parsing/serialization error. Json(String), + /// OAuth2 authentication error. OAuth2(String), + /// Google Calendar API error. GoogleApi(String), + /// HTTP transport error. Http(String), + /// Date/time parsing error. DateTime(String), + /// Calendar could not be found or accessed. CalendarNotFound, + /// Meet link was not present in the created event. MeetLinkNotFound, + /// Catch-all error. Other(String), } @@ -62,9 +83,13 @@ impl std::fmt::Display for GoogleMeetError { } } +#[instrument(skip(hasura_transaction), err)] /// Implementation function for generating Google Meet links /// Creates a calendar event with Google Meet integration using service account credentials -#[instrument(skip(hasura_transaction), err)] +/// +/// # Errors +/// +/// Returns an error if tenant settings are missing, authentication fails, or the calendar event cannot be created. pub async fn generate_google_meet_link_impl( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -220,6 +245,10 @@ pub async fn generate_google_meet_link_impl( } /// Parse datetime string with timezone into EventDateTime +/// +/// # Errors +/// +/// Returns an error if the datetime string is invalid or the timezone is invalid. #[instrument(err)] fn parse_datetime(datetime_str: &str, timezone: &str) -> Result { // The datetime should be in ISO 8601 format (e.g., "2025-09-29T12:45:00.000Z") diff --git a/packages/windmill/src/services/import/import_bulletin_boards.rs b/packages/windmill/src/services/import/import_bulletin_boards.rs index b2e0a24eb1..dd7bedc167 100644 --- a/packages/windmill/src/services/import/import_bulletin_boards.rs +++ b/packages/windmill/src/services/import/import_bulletin_boards.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports bulletin-board message CSVs and protocol-manager keys into the configured storage backends. use crate::postgres::election::get_elections; use crate::services::export::export_bulletin_boards::*; use crate::services::protocol_manager::get_b3_pgsql_client; @@ -21,6 +22,11 @@ use std::collections::HashSet; use tempfile::NamedTempFile; use tracing::{info, instrument}; +/// Parses a CSV record into a bulletin-board row. +/// +/// # Errors +/// +/// Returns an error if required fields are missing or cannot be parsed/decoded. #[instrument] fn get_board_record(record: StringRecord) -> Result<(String, B3MessageRow)> { let fields: Vec = record.iter().map(|val| val.to_string()).collect(); @@ -78,6 +84,7 @@ fn get_board_record(record: StringRecord) -> Result<(String, B3MessageRow)> { Ok((election_id, row)) } +/// Returns the event-level board name or an election board name. #[instrument] fn get_board_name_for_event_or_election( tenant_id: &str, @@ -92,6 +99,11 @@ fn get_board_name_for_event_or_election( } } +/// Imports protocol-manager keys from a CSV and stores them under each board’s secret path. +/// +/// # Errors +/// +/// Returns an error if CSV parsing/validation fails, ids cannot be replaced, or vault writes fail. #[instrument(err, skip(replacement_map))] pub async fn import_protocol_manager_keys( hasura_transaction: &Transaction<'_>, @@ -199,6 +211,11 @@ pub async fn import_protocol_manager_keys( Ok(()) } +/// Imports bulletin-board rows from a CSV and inserts them into the event/election boards. +/// +/// # Errors +/// +/// Returns an error if CSV parsing/validation fails, ids cannot be replaced, or B3 insert fails. #[instrument(err)] pub async fn import_bulletin_boards( tenant_id: &str, diff --git a/packages/windmill/src/services/import/import_election_event.rs b/packages/windmill/src/services/import/import_election_event.rs index 9706834ca1..c6d0c1ac3d 100644 --- a/packages/windmill/src/services/import/import_election_event.rs +++ b/packages/windmill/src/services/import/import_election_event.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Election-event ZIP import orchestration: decode the exported schema bundle, +//! replace ids, import DB rows, Keycloak realm, and optional artifacts. use crate::postgres::application::insert_applications; use crate::postgres::election_event::{get_election_event_by_id_if_exist, update_bulletin_board}; @@ -69,8 +71,6 @@ use tracing::{event, info, instrument, Level}; use uuid::Uuid; use zip::read::ZipArchive; -type ZipDocumentEntries = (Vec<(String, Vec)>, String); - use super::import_users::import_users_file; use crate::postgres; use crate::postgres::area::insert_areas; @@ -105,29 +105,52 @@ use sequent_core::types::hasura::core::{Area, Candidate, Contest, Election, Elec use sequent_core::types::keycloak::CERTIFICATES_IDP_ALIAS; use sequent_core::types::scheduled_event::*; use sequent_core::util::temp_path::{generate_temp_file, get_file_size}; + +/// ZIP entry bytes plus the original filename (used for id replacement and routing). +type ZipDocumentEntries = (Vec<(String, Vec)>, String); +/// Schema payload embedded in election-event ZIP exports. #[derive(Debug, Serialize, Deserialize, Clone)] pub struct ImportElectionEventSchema { + /// Owning tenant id. pub tenant_id: Uuid, + /// Optional Keycloak realm export for the event realm. pub keycloak_event_realm: Option, + /// Election event row. pub election_event: ElectionEvent, + /// Elections belonging to the event. pub elections: Vec, + /// Contests belonging to the event. pub contests: Vec, + /// Candidates belonging to the event. pub candidates: Vec, + /// Areas belonging to the event. pub areas: Vec, + /// Area/contest join rows. pub area_contests: Vec, + /// Optional scheduled events included in the export. pub scheduled_events: Option>, + /// Reports included in the export (may be empty). pub reports: Vec, + /// Optional keys ceremonies included in the export. pub keys_ceremonies: Option>, + /// Optional applications included in the export. pub applications: Option>, #[serde(default = "default_version")] + /// Export system version. pub version: String, } -// Set the default version of an imported election event to be compatible with version 9, which is the first version to include this feature. +/// Set the default version of an imported election event to be compatible with version 9, +/// which is the first version to include this feature. fn default_version() -> String { "9.0.0".to_string() } +/// Ensures B3 boards and electoral-log backing DB exist for the event and its elections. +/// +/// # Errors +/// +/// Returns an error if board creation/lookup, immudb setup, or key generation fails. #[instrument(err)] pub async fn upsert_b3_and_elog( hasura_transaction: &Transaction<'_>, @@ -217,6 +240,11 @@ pub async fn upsert_b3_and_elog( Ok(board_value) } +/// Loads the default Keycloak event-realm JSON template configured by environment. +/// +/// # Errors +/// +/// Returns an error if the env var is missing, the file cannot be read, or JSON cannot be parsed. #[instrument(err)] pub fn read_default_election_event_realm() -> Result { let realm_config_path = env::var("KEYCLOAK_ELECTION_EVENT_REALM_CONFIG_PATH") @@ -227,6 +255,11 @@ pub fn read_default_election_event_realm() -> Result { .map_err(|err| anyhow!("Error parsing KEYCLOAK_ELECTION_EVENT_REALM_CONFIG_PATH into RealmRepresentation: {err}")) } +/// Removes client secrets and regenerates the certificates IDP/client secret so imported realms are safe to apply. +/// +/// # Errors +/// +/// Returns an error if required Keycloak client env vars are missing. #[instrument(skip(realm))] pub fn remove_keycloak_realm_secrets(realm: &RealmRepresentation) -> Result { let keycloak_client_id = @@ -320,6 +353,11 @@ pub fn remove_keycloak_realm_secrets(realm: &RealmRepresentation) -> Result, @@ -442,6 +485,10 @@ pub async fn insert_election_event_db( /// A tuple containing: /// * The modified ImportElectionEventSchema with replaced UUIDs /// * A HashMap mapping old UUIDs to their new replacements +/// +/// # Errors +/// +/// Returns an error if UUID replacement or schema (de)serialization fails. #[instrument(err, skip(data_str, original_data))] pub fn replace_ids( data_str: &str, @@ -475,6 +522,11 @@ pub fn replace_ids( Ok((data, replacement_map)) } +/// Loads the import ZIP document into a temp file and decrypts it when a password is present. +/// +/// # Errors +/// +/// Returns an error if the document cannot be fetched, downloaded, or decrypted. #[instrument(err, skip_all)] pub async fn get_document( hasura_transaction: &Transaction<'_>, @@ -509,6 +561,11 @@ pub async fn get_document( Ok((temp_file, document, document_type)) } +/// Decrypts the temp file when a non-empty password is provided. +/// +/// # Errors +/// +/// Returns an error if decryption or temp file IO fails. #[instrument(err, skip_all)] pub async fn decrypt_document( password: Option, @@ -541,6 +598,10 @@ pub async fn decrypt_document( /// Get the election event schma and also: /// - Check version compatibility /// - Replace IDs and return a mapping of old to new IDs (for preserving references in other documents like voters) +/// +/// # Errors +/// +/// Returns an error if schema parsing, version checks, or id replacement fails. #[instrument(err, skip_all)] pub async fn get_election_event_schema( data_str: &str, @@ -554,6 +615,11 @@ pub async fn get_election_event_schema( replace_ids(data_str, &original_data, event_id, tenant_id.clone()) } +/// Imports the election event schema into Hasura/Keycloak and returns the updated schema plus id replacement map. +/// +/// # Errors +/// +/// Returns an error if schema processing, realm upsert, or DB inserts fail. #[instrument(err, skip_all)] pub async fn process_election_event_file( hasura_transaction: &Transaction<'_>, @@ -744,6 +810,7 @@ pub async fn process_election_event_file( Ok((data, replacement_map)) } +/// Imports a users file from the ZIP. #[instrument(err, skip(hasura_transaction, temp_file))] async fn process_voters_file( hasura_transaction: &Transaction<'_>, @@ -773,6 +840,11 @@ async fn process_voters_file( Ok(()) } +/// Imports reports from the CSV export and creates secrets for encrypted reports. +/// +/// # Errors +/// +/// Returns an error if CSV parsing, id replacement, secret creation, or inserts fail. #[instrument(err, skip_all)] pub async fn process_reports_file( hasura_transaction: &Transaction<'_>, @@ -876,6 +948,7 @@ pub async fn process_reports_file( Ok(()) } +/// Imports activity logs from CSV into the electoral log. #[instrument(err, skip(temp_file))] async fn process_activity_logs_file( hasura_transaction: &Transaction<'_>, @@ -898,6 +971,11 @@ async fn process_activity_logs_file( Ok(()) } +/// Extracts the UUID from a `document__...` filename. +/// +/// # Errors +/// +/// Returns an error if the regex cannot be compiled. async fn extract_document_uuid(filename: &str) -> Result> { // Regex to match the UUID after "document_" let re = Regex::new( @@ -912,6 +990,11 @@ async fn extract_document_uuid(filename: &str) -> Result> { Ok(uuid) } +/// Extracts the name suffix from a `document__` filename. +/// +/// # Errors +/// +/// Returns an error if the regex cannot be compiled. async fn extract_document_name(filename: &str) -> Result> { let re = Regex::new( r"document_[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}_(.+)" @@ -925,10 +1008,16 @@ async fn extract_document_name(filename: &str) -> Result> { Ok(name) } +/// Regex matching UUID substrings for id replacement in filenames. static UUID_RE: Lazy = Lazy::new(|| { Regex::new(r"(?i)\b[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}\b").unwrap() }); +/// Rewrites UUIDs inside `file_name` using `replacement_map`. +/// +/// # Panics +/// +/// Panics if the capture group is unexpectedly missing. pub fn replace_ids_in_filename( file_name: &str, replacement_map: &HashMap, @@ -945,6 +1034,11 @@ pub fn replace_ids_in_filename( .into_owned() } +/// Uploads a file extracted from the ZIP, mapping its document id and rewriting UUIDs in the filename. +/// +/// # Errors +/// +/// Returns an error if UUID extraction/replacement or upload fails. #[instrument(err, skip(hasura_transaction, temp_file_path, replacement_map))] pub async fn process_s3_file( hasura_transaction: &Transaction<'_>, @@ -1000,6 +1094,10 @@ pub async fn process_s3_file( } // return zip entries, and the original string of the json schema +/// +/// # Errors +/// +/// Returns an error if the file cannot be read as zip/json or the schema entry is missing. #[instrument(err, skip(temp_file_path))] pub async fn get_zip_entries( temp_file_path: NamedTempFile, @@ -1071,6 +1169,11 @@ pub async fn get_zip_entries( Ok((zip_entries, election_event_schema)) } +/// Orchestrates import for a single ZIP document. +/// +/// # Errors +/// +/// Returns an error if document retrieval, zip parsing, or any sub-import fails. #[instrument(err, skip_all)] pub async fn process_document( hasura_transaction: &Transaction<'_>, @@ -1416,6 +1519,11 @@ pub async fn process_document( Ok(()) } +/// Applies imported scheduled-event dates by creating START/END voting period scheduled events. +/// +/// # Errors +/// +/// Returns an error if date generation or insertion fails. #[instrument(err, skip_all)] pub async fn manage_dates( data: &ImportElectionEventSchema, @@ -1489,6 +1597,11 @@ pub async fn manage_dates( Ok(()) } +/// Inserts a manage-date scheduled event for the given `event_processor`. +/// +/// # Errors +/// +/// Returns an error if serializing the payload or inserting the scheduled event fails. #[instrument(err, skip_all)] pub async fn maybe_create_scheduled_event( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/import/import_publications.rs b/packages/windmill/src/services/import/import_publications.rs index a5f6529d22..d050e72518 100644 --- a/packages/windmill/src/services/import/import_publications.rs +++ b/packages/windmill/src/services/import/import_publications.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports ballot publications and the election-event config file into the public bucket. use anyhow::{anyhow, Context, Result}; use csv::StringRecord; @@ -22,12 +23,20 @@ use uuid::Uuid; use crate::services::ballot_styles::ballot_style::{ElectionEventConfig, EVENT_CONFIG_FILE_NAME}; use crate::services::documents::upload_and_return_public_event_document; +/// JSON shape holding a ballot publication id and its serialized ballot styles. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ballot_design { + /// Ballot publication id referenced by the styles in this entry. ballot_publication_id: String, + /// Ballot styles belonging to the publication. ballot_styles: Vec, } +/// Imports ballot publications from a JSON file into the database. +/// +/// # Errors +/// +/// Returns an error if the input cannot be read or deserialized. #[instrument(err, skip(replacement_map))] pub async fn import_ballot_publications( hasura_transaction: &Transaction<'_>, @@ -47,6 +56,11 @@ pub async fn import_ballot_publications( /// Imports the election event config file, /// This file contains the election event presentation and is created during publication. +/// +/// # Errors +/// +/// Returns an error if the input cannot be read/deserialized, +/// the temp file cannot be written, or upload fails. #[instrument(err, skip(replacement_map))] pub async fn import_election_event_config_file( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/import/import_scheduled_events.rs b/packages/windmill/src/services/import/import_scheduled_events.rs index a7d32cd887..d6506defa8 100644 --- a/packages/windmill/src/services/import/import_scheduled_events.rs +++ b/packages/windmill/src/services/import/import_scheduled_events.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports scheduled events from a CSV file into the database. use crate::postgres::scheduled_event::insert_new_scheduled_event; use anyhow::{anyhow, Context, Result}; @@ -21,9 +22,15 @@ use tracing::{info, instrument}; use uuid::Uuid; lazy_static! { + /// Validates scheduled-event CSV column names (alphanumeric, dot, underscore, hyphen). pub static ref HEADER_RE: Regex = Regex::new(r"^[a-zA-Z0-9._-]+$").unwrap(); } +/// Imports scheduled events from a CSV export into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing/validation fails or if record insertion fails. #[instrument(err, skip(replacement_map))] pub async fn import_scheduled_events( hasura_transaction: &Transaction<'_>, @@ -67,6 +74,11 @@ pub async fn import_scheduled_events( Ok(()) } +/// Converts one CSV record into a [`ScheduledEvent`] row and inserts it. +/// +/// # Errors +/// +/// Returns an error if deserialization fails or if inserting the row fails. #[instrument(err, skip_all)] pub async fn process_record( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/import/import_tally.rs b/packages/windmill/src/services/import/import_tally.rs index f1f2ac1973..1d387cd565 100644 --- a/packages/windmill/src/services/import/import_tally.rs +++ b/packages/windmill/src/services/import/import_tally.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports tally/results from a CSV file into the database. use crate::{ postgres::{ results_area_contest::insert_many_results_area_contests, @@ -37,6 +38,11 @@ use tempfile::NamedTempFile; use tracing::{info, instrument}; use uuid::Uuid; +/// Replaces an array of UUIDs using `replacement_map`. +/// +/// # Errors +/// +/// Returns an error if JSON parsing fails or any id is missing from the map. #[instrument(err, skip_all)] async fn process_uuids( ids: Option<&str>, @@ -63,6 +69,11 @@ async fn process_uuids( } } +/// Reads an id from `record[index]` and replaces it using `replacement_map`. +/// +/// # Errors +/// +/// Returns an error if the column is missing, JSON is invalid, or the id is not in the map. #[instrument(err, skip_all)] pub async fn get_replaced_id( record: &StringRecord, @@ -81,6 +92,11 @@ pub async fn get_replaced_id( Ok(new_id) } +/// Parses an optional `i64` from `record[index]`. +/// +/// # Errors +/// +/// Returns an error if the field is present but not an integer. #[instrument(err, skip_all)] pub async fn get_opt_i64_item(record: &StringRecord, index: usize) -> Result> { let item = record @@ -93,6 +109,11 @@ pub async fn get_opt_i64_item(record: &StringRecord, index: usize) -> Result Result> { let item = record @@ -104,6 +125,11 @@ pub async fn get_opt_json_value_item(record: &StringRecord, index: usize) -> Res Ok(item) } +/// Parses an optional `NotNan` from `record[index]`. +/// +/// # Errors +/// +/// Returns an error if the field is present but not a number or is NaN. #[instrument(err, skip_all)] pub async fn get_opt_f64_item(record: &StringRecord, index: usize) -> Result>> { let item = record @@ -120,6 +146,11 @@ pub async fn get_opt_f64_item(record: &StringRecord, index: usize) -> Result Result>> { let item = record.get(index).and_then(|s| { @@ -150,6 +186,11 @@ pub async fn get_opt_date(record: &StringRecord, index: usize) -> Result, @@ -204,6 +245,11 @@ async fn process_event_results_file( Ok(()) } +/// Imports the `results_election` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, id replacement fails, or inserts fail. #[instrument(err, skip_all)] async fn process_results_election_file( hasura_transaction: &Transaction<'_>, @@ -270,6 +316,11 @@ async fn process_results_election_file( Ok(()) } +/// Imports the `tally_session` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails or inserts fail. #[instrument(err, skip_all)] async fn process_tally_session_file( hasura_transaction: &Transaction<'_>, @@ -301,6 +352,11 @@ async fn process_tally_session_file( Ok(()) } +/// Converts one `tally_session` CSV record into a [`TallySession`]. +/// +/// # Errors +/// +/// Returns an error if parsing fails or ids cannot be replaced. #[instrument(err, skip_all)] pub async fn process_tally_session_record( tenant_id: &str, @@ -372,6 +428,11 @@ pub async fn process_tally_session_record( Ok(tally_session) } +/// Imports the `tally_session_contest` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails or inserts fail. #[instrument(err, skip_all)] async fn process_tally_session_contest_file( hasura_transaction: &Transaction<'_>, @@ -443,6 +504,11 @@ async fn process_tally_session_contest_file( Ok(()) } +/// Imports the `tally_session_execution` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, ids cannot be replaced, or inserts fail. #[instrument(err, skip_all)] async fn process_tally_session_execution_file( hasura_transaction: &Transaction<'_>, @@ -524,6 +590,11 @@ async fn process_tally_session_execution_file( Ok(()) } +/// Imports the `results_election_area` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, ids cannot be replaced, or inserts fail. #[instrument(err, skip_all)] async fn process_results_election_area_file( hasura_transaction: &Transaction<'_>, @@ -579,6 +650,11 @@ async fn process_results_election_area_file( Ok(()) } +/// Imports the `results_contest` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, ids cannot be replaced, or inserts fail. #[instrument(err, skip_all)] async fn process_results_contest_file( hasura_transaction: &Transaction<'_>, @@ -612,6 +688,11 @@ async fn process_results_contest_file( Ok(()) } +/// Imports the `results_contest_candidate` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, ids cannot be replaced, or inserts fail. #[instrument(err, skip_all)] async fn process_results_contest_candidate_file( hasura_transaction: &Transaction<'_>, @@ -676,6 +757,11 @@ async fn process_results_contest_candidate_file( Ok(()) } +/// Converts one `results_contest` CSV record into a [`ResultsContest`]. +/// +/// # Errors +/// +/// Returns an error if parsing fails or ids cannot be replaced. #[instrument(err, skip_all)] pub async fn process_results_contest_record( tenant_id: &str, @@ -765,6 +851,11 @@ pub async fn process_results_contest_record( Ok(results_contest) } +/// Imports the `results_area_contest` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, ids cannot be replaced, or inserts fail. #[instrument(err, skip_all)] async fn process_results_area_contest_file( hasura_transaction: &Transaction<'_>, @@ -852,6 +943,11 @@ async fn process_results_area_contest_file( Ok(()) } +/// Imports the `results_area_contest_candidate` CSV into the database. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails, ids cannot be replaced, or inserts fail. #[instrument(err, skip_all)] async fn process_results_area_contest_candidate_file( hasura_transaction: &Transaction<'_>, @@ -926,6 +1022,11 @@ async fn process_results_area_contest_candidate_file( Ok(()) } +/// Imports a full tally bundle by dispatching each expected CSV to its corresponding importer. +/// +/// # Errors +/// +/// Returns an error if any required file is missing or any sub-import fails. #[instrument(err, skip_all)] pub async fn process_tally_file( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/import/import_tenant.rs b/packages/windmill/src/services/import/import_tenant.rs index e9c3bb8858..d9f0143816 100644 --- a/packages/windmill/src/services/import/import_tenant.rs +++ b/packages/windmill/src/services/import/import_tenant.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports a tenant from a CSV file into the database. use crate::postgres::tenant::update_tenant; use anyhow::{anyhow, Context, Result}; @@ -18,9 +19,15 @@ use tracing::{info, instrument}; use uuid::Uuid; lazy_static! { + /// Validates tenant CSV column names (alphanumeric, dot, underscore, hyphen). pub static ref HEADER_RE: Regex = Regex::new(r"^[a-zA-Z0-9._-]+$").unwrap(); } +/// Reads a tenant CSV file and upserts the single tenant row. +/// +/// # Errors +/// +/// Returns an error if CSV parsing/validation fails or if the tenant update fails. #[instrument(err, skip_all)] pub async fn upsert_tenant( hasura_transaction: &Transaction<'_>, @@ -57,6 +64,11 @@ pub async fn upsert_tenant( Ok(()) } +/// Converts one CSV row into a [`Tenant`] and updates it under `old_tenant_id`. +/// +/// # Errors +/// +/// Returns an error if required fields are missing, parsing fails, or the DB update fails. #[instrument(err, skip_all)] pub async fn process_record( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/import/import_tenant_config.rs b/packages/windmill/src/services/import/import_tenant_config.rs index 5de891bfc1..f8e283ab1c 100644 --- a/packages/windmill/src/services/import/import_tenant_config.rs +++ b/packages/windmill/src/services/import/import_tenant_config.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports the tenant-config ZIP bundle (tenant CSV, roles/permissions CSV, and Keycloak realm JSON). use crate::postgres; use crate::services::database::get_hasura_pool; @@ -23,6 +24,12 @@ use tempfile::NamedTempFile; use tracing::{info, instrument}; use zip::read::ZipArchive; +/// Imports the tenant-config ZIP document and applies the selected parts from `import_options`. +/// +/// # Errors +/// +/// Returns an error if the document cannot be fetched/verified, +/// ZIP reading fails, or any selected import step fails. pub async fn import_tenant_config_zip( import_options: ImportOptions, tenant_id: &str, @@ -169,6 +176,11 @@ pub async fn import_tenant_config_zip( Ok(()) } +/// Reads all non-hidden, non-directory ZIP entries into `(name, bytes)` pairs. +/// +/// # Errors +/// +/// Returns an error if the zip cannot be opened, parsed, or read. #[instrument(err, skip(temp_file_path))] pub async fn get_zip_entries(temp_file_path: NamedTempFile) -> Result)>> { let zip_file = File::open(&temp_file_path).map_err(|e| anyhow!("File open error: {}", e))?; @@ -200,6 +212,11 @@ pub async fn get_zip_entries(temp_file_path: NamedTempFile) -> Result) -> Result { let mut temp_file = NamedTempFile::new().context("Failed to create temporary file")?; io::copy(cursor, &mut temp_file).context("Failed to copy contents to temporary file")?; diff --git a/packages/windmill/src/services/import/import_users.rs b/packages/windmill/src/services/import/import_users.rs index 58970ea3d7..13bda7b1f9 100644 --- a/packages/windmill/src/services/import/import_users.rs +++ b/packages/windmill/src/services/import/import_users.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports users from a CSV file into the database. use crate::postgres::area::get_areas_by_name; use crate::postgres::keycloak_realm; @@ -29,18 +30,31 @@ use tracing::{debug, info, instrument, warn}; use uuid::Uuid; lazy_static! { + /// Validates users CSV column names (alphanumeric, dot, underscore, hyphen). pub static ref HEADER_RE: Regex = Regex::new(r"^[a-zA-Z0-9._-]+$").unwrap(); + /// PBKDF2 iteration count used for generated password hashes. static ref PBKDF2_ITERATIONS: NonZeroU32 = NonZeroU32::new(27_500).unwrap(); + /// Reserved column name: number of PBKDF2 iterations. static ref NUMBER_OF_ITERATIONS_COL_NAME: String = String::from("num_of_iterations"); + /// Reserved column name: base64 salt. static ref SALT_COL_NAME: String = String::from("password_salt"); + /// Reserved column name: base64 PBKDF2 output. static ref HASHED_PASSWORD_COL_NAME: String = String::from("hashed_password"); + /// Reserved column name: plaintext password input. static ref PASSWORD_COL_NAME: String = String::from("password"); + /// Reserved column name: username. static ref USERNAME_COL_NAME: String = String::from("username"); + /// Reserved column name: email. static ref EMAIL_COL_NAME: String = String::from("email"); + /// Reserved column name: email verification flag. static ref EMAIL_VERIFIED_COL_NAME: String = String::from("email_verified"); + /// Reserved column name: group name. static ref GROUP_COL_NAME: String = String::from("group_name"); + /// Reserved column name: area name. static ref AREA_NAME_COL_NAME: String = String::from("area_name"); + /// Prefix for dynamic election vote-info columns. static ref ELECTION_COL_PREFIX: String = String::from("election__"); + /// Set of reserved column names consumed during import. static ref RESERVED_COL_NAMES: Vec = vec![ HASHED_PASSWORD_COL_NAME.clone(), SALT_COL_NAME.clone(), @@ -51,14 +65,23 @@ lazy_static! { ]; } +/// PBKDF2 algorithm used for password hashing. static PBKDF2_ALGORITHM: pbkdf2::Algorithm = pbkdf2::PBKDF2_HMAC_SHA256; +/// Output length for PBKDF2 SHA-256 digests. const CREDENTIAL_LEN: usize = digest::SHA256_OUTPUT_LEN; +/// Fixed-size PBKDF2 digest buffer. pub type Credential = [u8; CREDENTIAL_LEN]; +/// Sanitizes a CSV/header key into a SQL-safe identifier by replacing separators with `_`. fn sanitize_db_key(key: &str) -> String { key.replace(".", "_").replace("-", "_") } +/// Hashes `password` with `salt` using the configured PBKDF2 settings. +/// +/// # Errors +/// +/// Returns an error only if allocation/encoding fails. fn hash_password(password: &String, salt: &[u8]) -> Result { let mut output: Credential = [0u8; CREDENTIAL_LEN]; pbkdf2::derive( @@ -105,6 +128,11 @@ fn hash_password(password: &String, salt: &[u8]) -> Result { */ type CopyFromQueryParts = (String, String, String, Vec, Vec, Vec); +/// Builds the DDL and COPY statements for a temporary table from the CSV headers. +/// +/// # Errors +/// +/// Returns an error if the header set cannot be converted into a consistent schema. #[instrument(ret)] fn get_copy_from_query(headers: &StringRecord) -> anyhow::Result { let random_number: u64 = rand::random(); @@ -188,13 +216,17 @@ fn get_copy_from_query(headers: &StringRecord) -> anyhow::Result, diff --git a/packages/windmill/src/services/import/mod.rs b/packages/windmill/src/services/import/mod.rs index 7027ce34cb..69300a922c 100644 --- a/packages/windmill/src/services/import/mod.rs +++ b/packages/windmill/src/services/import/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Import helpers for zip bundles and CSV/JSON artifacts. + pub mod import_bulletin_boards; pub mod import_election_event; pub mod import_publications; diff --git a/packages/windmill/src/services/insert_cast_vote.rs b/packages/windmill/src/services/insert_cast_vote.rs index 1c140346e6..b82c42976d 100644 --- a/packages/windmill/src/services/insert_cast_vote.rs +++ b/packages/windmill/src/services/insert_cast_vote.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Inserting cast votes and managing electoral log side effects. + use crate::postgres; use crate::postgres::area::get_area_by_id; use crate::postgres::election::get_election_by_id; @@ -67,8 +70,10 @@ use sequent_core::encrypt::hash_multi_ballot; use sequent_core::services::uuid_validation::parse_uuid_v4; use serde_json::Serializer; #[derive(Serialize, Deserialize, Debug, Clone)] +/// Input payload for casting a vote. +#[allow(missing_docs)] pub struct InsertCastVoteInput { - // Here is the class used for voting + /// Here is the class used for voting pub ballot_id: String, pub election_id: Uuid, pub content: String, @@ -116,14 +121,20 @@ impl InsertCastVoteInput { } } +/// Successful cast-vote row as stored after insertion. pub type InsertCastVoteOutput = CastVote; +/// Outcome of a cast-vote insert attempt for the task layer. pub enum InsertCastVoteResult { + /// Vote was persisted and committed. Success(InsertCastVoteOutput), + /// Permanent failure; the caller should not retry the same payload. SkipRetryFailure(CastVoteError), } #[derive(Debug)] +/// Identifiers required during cast-vote processing. +#[allow(clippy::missing_docs_in_private_items)] struct CastVoteIds<'a> { election_event_id: &'a str, tenant_id: &'a str, @@ -132,6 +143,8 @@ struct CastVoteIds<'a> { } #[derive(Serialize, Deserialize, Debug, Display)] +/// Error codes returned by cast-vote processing. +#[allow(missing_docs)] pub enum CastVoteError { #[serde(rename = "voting_channel_not_enabled")] VotingChannelNotEnabled(String), @@ -192,6 +205,7 @@ pub enum CastVoteError { } impl CastVoteError { + /// Downcast an [`anyhow::Error`] to [`CastVoteError`] or wrap as [`UnknownError`](CastVoteError::UnknownError). pub fn new(error: anyhow::Error) -> Self { match error.downcast::() { Ok(e) => e, @@ -200,6 +214,11 @@ impl CastVoteError { } } +/// Insert a vote into the database. +/// +/// # Errors +/// +/// Returns a [`CastVoteError`] describing why vote insertion failed. #[instrument(skip(input), err)] pub async fn try_insert_cast_vote( input: InsertCastVoteInput, @@ -473,12 +492,18 @@ pub async fn try_insert_cast_vote( } } +/// Pseudonym hash, vote hash, and optional verified voter signature tuple. type DeserializedCastVoteHashes = ( PseudonymHash, CastVoteHash, Option<(StrandSignaturePk, StrandSignature)>, ); +/// Deserialize a single-contest ballot, verify PoK and voter signature, and compute hashes. +/// +/// # Errors +/// +/// Returns [`CastVoteError`] when deserialization, hash mismatch, PoK, or signature checks fail. #[instrument(skip(input), err)] pub fn deserialize_and_check_ballot( input: &InsertCastVoteInput, @@ -540,6 +565,11 @@ pub fn deserialize_and_check_ballot( Ok((pseudonym_h, vote_h, signature_opt)) } +/// Deserialize a multi-contest ballot, verify PoK and voter signature, and compute hashes. +/// +/// # Errors +/// +/// Returns [`CastVoteError`] when deserialization, hash mismatch, PoK, or signature checks fail. #[instrument(skip(input), err)] pub fn deserialize_and_check_multi_ballot( input: &InsertCastVoteInput, @@ -599,6 +629,11 @@ pub fn deserialize_and_check_multi_ballot( Ok((pseudonym_h, vote_h, voter_signature_opt)) } +/// Insert a cast vote row inside `hasura_transaction` and commit if checks pass. +/// +/// # Errors +/// +/// Returns [`CastVoteError`] on validation failure, insert error, or commit failure. #[instrument( skip( input, @@ -701,11 +736,21 @@ pub async fn insert_cast_vote_and_commit<'a>( Ok(cast_vote) } +/// Hash a voter id using strand serialization and SHA256-to-array. +/// +/// # Errors +/// +/// Returns an error if the voter id cannot be serialized. pub(crate) fn hash_voter_id(voter_id: &str) -> Result { let bytes = voter_id.to_string().strand_serialize()?; hash_to_array(&bytes) } +/// Load signing material and an [`ElectoralLog`] client for the event's bulletin board. +/// +/// # Errors +/// +/// Returns an error if the board name, protocol manager, or electoral log construction fails. #[instrument(skip_all, err)] async fn get_electoral_log( hasura_transaction: &Transaction<'_>, @@ -736,6 +781,15 @@ async fn get_electoral_log( Ok((electoral_log?, sk.clone())) } +/// Validate election dates, voting channel, and voting status before accepting a vote. +/// +/// # Errors +/// +/// Returns [`CastVoteError::CheckStatusFailed`] or related variants when the vote must be rejected. +/// +/// # Panics +/// +/// Panics if internal date arithmetic overflows (`close date plus grace period`, or similar edge cases). #[instrument(skip_all, err)] async fn check_status( tenant_id: &str, @@ -954,6 +1008,11 @@ async fn check_status( Ok(()) } +/// Enforce max revotes and single-area voting rules against existing cast votes. +/// +/// # Errors +/// +/// Returns [`CastVoteError`] when revote limits or cross-area rules are violated. #[instrument(skip_all, err)] async fn check_previous_votes( voter_id_string: &str, @@ -1007,6 +1066,11 @@ async fn check_previous_votes( Ok(()) } +/// Verify the proof of knowledge for one ballot contest ciphertext. +/// +/// # Errors +/// +/// Returns an error when the ZKP library reports failure or verification returns false. #[instrument(skip_all, err)] fn check_popk(ballot_contest: &HashableBallotContest) -> Result<()> { let zkp = Zkp::new(&RistrettoCtx); @@ -1027,6 +1091,11 @@ fn check_popk(ballot_contest: &HashableBallotContest) -> Result<() Ok(()) } +/// Verify the proof of knowledge for a multi-contest ballot ciphertext bundle. +/// +/// # Errors +/// +/// Returns an error when the ZKP library reports failure or verification returns false. #[instrument(skip_all, err)] fn check_popk_multi(ballot_contest: &HashableMultiBallotContests) -> Result<()> { let zkp = Zkp::new(&RistrettoCtx); diff --git a/packages/windmill/src/services/join.rs b/packages/windmill/src/services/join.rs index b1babae33b..63563fc973 100644 --- a/packages/windmill/src/services/join.rs +++ b/packages/windmill/src/services/join.rs @@ -2,12 +2,25 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Async join utilities for merging ballots and voters CSV files. + use anyhow::{anyhow, Result}; use csv::ReaderBuilder; use std::{cmp::Ordering, fs::File}; use tracing::{info, instrument}; #[instrument(skip_all, err)] +/// Merge-join two headerless CSV streams sorted by voter id. +/// +/// Returns the joined output lines plus counters: ballots without voter, eligible voters, and casted ballots. +/// +/// # Errors +/// +/// Returns an error if CSV parsing fails or record comparison encounters inconsistent data. +/// +/// # Panics +/// +/// Panics if internal counters overflow while processing the input streams. pub fn merge_join_csv( ballots_file: &File, voters_file: &File, diff --git a/packages/windmill/src/services/jwks.rs b/packages/windmill/src/services/jwks.rs index 2043c97897..ce23418a4c 100644 --- a/packages/windmill/src/services/jwks.rs +++ b/packages/windmill/src/services/jwks.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing JWKS maintenance: S3 cache, Keycloak realm certs, and realm-scoped key removal. + use anyhow::{anyhow, Context, Result}; use sequent_core::services::s3; use sequent_core::util::temp_path::generate_temp_file; @@ -11,26 +14,43 @@ use tempfile::NamedTempFile; use tracing::{event, instrument, Level}; #[derive(Serialize, Deserialize, Debug, Clone)] +/// Single JWK entry as used by Keycloak/OpenID Connect. pub struct JWKKey { + /// Signing algorithm (e.g. "RS256"). pub alg: String, + /// Key type (e.g. "RSA"). pub kty: String, + /// Intended use (typically "sig"). pub r#use: String, + /// Modulus (base64url). pub n: String, + /// Exponent (base64url). pub e: String, + /// Key id. pub kid: String, + /// X.509 certificate SHA-1 thumbprint. pub x5t: String, + /// X.509 certificate chain (base64 DER). pub x5c: Vec, } #[derive(Serialize, Deserialize, Debug, Clone)] +/// JWKS payload as returned by Keycloak/OpenID endpoints. pub struct JwksOutput { + /// Key list. pub keys: Vec, } +/// S3 object path where the aggregated JWKS is stored. pub fn get_jwks_secret_path() -> String { env::var("AWS_S3_JWKS_CERTS_PATH").unwrap_or("certs.json".to_string()) } +/// Read the cache-control policy used when uploading the JWKS object. +/// +/// # Errors +/// +/// Returns an error if `AWS_S3_JWKS_CACHE_POLICY` is not set. pub fn get_cache_policy() -> Result { let cache_policy = env::var("AWS_S3_JWKS_CACHE_POLICY") .map_err(|err| anyhow!("AWS_S3_JWKS_CACHE_POLICY Must be set: {}", { err }))?; @@ -38,6 +58,11 @@ pub fn get_cache_policy() -> Result { } #[instrument(err)] +/// Get the JWKS from the S3 bucket. +/// +/// # Errors +/// +/// Returns an error if the JWKS object cannot be fetched or parsed. pub async fn get_jwks() -> Result> { let minio_private_uri = env::var("AWS_S3_PRIVATE_URI").map_err(|err| anyhow!("AWS_S3_PRIVATE_URI must be set"))?; @@ -64,6 +89,11 @@ pub async fn get_jwks() -> Result> { } #[instrument(err)] +/// Download a realm's JWKS from Keycloak. +/// +/// # Errors +/// +/// Returns an error if Keycloak cannot be reached or the JWKS response cannot be parsed. pub async fn download_realm_jwks_from_keycloak(realm: &str) -> Result> { let keycloak_url = env::var("KEYCLOAK_URL").map_err(|err| anyhow!("KEYCLOAK_URL must be set"))?; @@ -86,6 +116,11 @@ pub async fn download_realm_jwks_from_keycloak(realm: &str) -> Result Result<()> { let realm_jwks = download_realm_jwks_from_keycloak(realm) .await @@ -139,6 +174,11 @@ pub async fn upsert_realm_jwks(realm: &str) -> Result<()> { Ok(()) } +/// Remove all JWK entries for `realm` from the aggregated JWKS object in S3. +/// +/// # Errors +/// +/// Returns an error if Keycloak, S3, or temp file I/O fails. #[instrument(err)] pub async fn remove_realm_jwks(realm: &str) -> Result<()> { let realm_jwks = download_realm_jwks_from_keycloak(realm) diff --git a/packages/windmill/src/services/keycloak.rs b/packages/windmill/src/services/keycloak.rs index 7ebf20537d..5b18e95082 100644 --- a/packages/windmill/src/services/keycloak.rs +++ b/packages/windmill/src/services/keycloak.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Keycloak realm maintenance utilities. + use crate::types::error::Result; use anyhow::{anyhow, Context}; use keycloak::types::{GroupRepresentation, RealmRepresentation, RoleRepresentation}; @@ -14,6 +16,7 @@ use tempfile::NamedTempFile; use tracing::{event, info, instrument, Level}; use uuid::Uuid; +/// Map realm data to a tuple of container id, existing groups, and existing roles. pub fn map_realm_data( realm: &RealmRepresentation, ) -> ( @@ -32,6 +35,11 @@ pub fn map_realm_data( (container_id, existing_groups, existing_roles) } +/// Delete Keycloak realm roles and groups that are absent from the import lists. +/// +/// # Errors +/// +/// Returns an error if Keycloak admin calls fail or required ids are missing. #[instrument(err)] pub async fn delete_realm_groups_and_roles( existing_groups: &Vec, @@ -96,6 +104,7 @@ pub async fn delete_realm_groups_and_roles( Ok(()) } +/// Find a group by name in a list of groups. pub fn find_group_by_name( groups: &[GroupRepresentation], group_name: &str, @@ -106,6 +115,11 @@ pub fn find_group_by_name( .cloned() } +/// Apply roles and permissions from a CSV temp file to the given realm. +/// +/// # Errors +/// +/// Returns an error if clients cannot be created, CSV parsing fails, or Keycloak updates fail. #[instrument(err, skip_all)] pub async fn read_roles_config_file( temp_file: NamedTempFile, diff --git a/packages/windmill/src/services/keycloak_events.rs b/packages/windmill/src/services/keycloak_events.rs index 02a7547c4d..cdab8efbaa 100644 --- a/packages/windmill/src/services/keycloak_events.rs +++ b/packages/windmill/src/services/keycloak_events.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing Keycloak events. + use anyhow::{anyhow, Context, Result}; use deadpool_postgres::Transaction; use sequent_core::types::keycloak::AREA_ID_ATTR_NAME; @@ -9,10 +12,14 @@ use tokio_postgres::row::Row; use tokio_postgres::types::ToSql; use tracing::instrument; +/// Login event type. pub const LOGIN_EVENT_TYPE: &str = "LOGIN"; +/// Login error event type. pub const LOGIN_ERR_EVENT_TYPE: &str = "LOGIN_ERROR"; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +/// Keycloak event. +#[allow(missing_docs)] pub struct Event { pub id: String, pub event_time: i64, @@ -46,6 +53,11 @@ impl TryFrom for Event { } } +/// List Keycloak admin events of a given type (and optional action) for a realm. +/// +/// # Errors +/// +/// Returns an error if statement preparation, query execution, or row mapping fails. #[instrument(skip(keycloak_transaction), err)] pub async fn list_keycloak_events_by_type( keycloak_transaction: &Transaction<'_>, @@ -101,6 +113,15 @@ pub async fn list_keycloak_events_by_type( Ok(events) } +/// Count Keycloak events by type, optional error, dedupe, and area filters. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the count query fails. +/// +/// # Panics +/// +/// Panics if internal SQL parameter numbering overflows `i32` (pathological filter combinations). #[instrument(skip(keycloak_transaction), err)] pub async fn count_keycloak_events_by_type( keycloak_transaction: &Transaction<'_>, @@ -179,6 +200,11 @@ pub async fn count_keycloak_events_by_type( Ok(count) } +/// Count password-reset events for users in a given voting area. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the count query fails. #[instrument(skip(keycloak_transaction), err)] pub async fn count_keycloak_password_reset_event_by_area( keycloak_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/limit_access_by_countries.rs b/packages/windmill/src/services/limit_access_by_countries.rs index 5c790770fd..b55d846edc 100644 --- a/packages/windmill/src/services/limit_access_by_countries.rs +++ b/packages/windmill/src/services/limit_access_by_countries.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Cloudflare country-blocking rules for the voting portal and enrollment endpoints. + use super::cloudflare::{ create_ruleset, create_ruleset_rule, delete_ruleset_rule, get_cloudflare_vars, get_ruleset_by_phase, update_ruleset_rule, CreateCustomRuleRequest, Rule, Ruleset, @@ -10,6 +13,11 @@ use anyhow::{anyhow, Context, Result}; use rocket::{form::validate::Contains, http::Status}; use tracing::{info, instrument}; +/// Read voting portal and public Keycloak base URLs from the environment. +/// +/// # Errors +/// +/// Returns an error if the VOTING_PORTAL_URL or KEYCLOAK_PUBLIC_URL env vars are not set. #[instrument] fn get_voting_portal_urls_prefix() -> Result<(String, String)> { //TODO: change default values? @@ -20,6 +28,11 @@ fn get_voting_portal_urls_prefix() -> Result<(String, String)> { Ok((voting_portal_url, voting_portal_keycloak_url)) } +/// Build a Cloudflare WAF custom rule blocking countries for voting or enrollment hosts. +/// +/// # Errors +/// +/// Returns an error if the VOTING_PORTAL_URL or KEYCLOAK_PUBLIC_URL env vars are not set. #[instrument] fn create_limit_ip_by_countries_rule_format( tenant_id: String, @@ -70,6 +83,11 @@ fn create_limit_ip_by_countries_rule_format( }) } +/// Create, update, or delete a tenant's country-limit rule inside an existing ruleset. +/// +/// # Errors +/// +/// Returns an error if the Cloudflare rule update or creation fails. #[instrument] async fn update_or_create_limit_ip_by_countries_rule( api_key: &str, @@ -121,6 +139,11 @@ async fn update_or_create_limit_ip_by_countries_rule( Ok(rule) } +/// Create a new Cloudflare ruleset phase entry with the country-limit rule. +/// +/// # Errors +/// +/// Returns an error if the Cloudflare ruleset creation fails. #[instrument] async fn create_limit_ip_by_countries_ruleset( api_key: &str, @@ -143,6 +166,11 @@ async fn create_limit_ip_by_countries_ruleset( Ok(rule) } +/// Sync Cloudflare WAF rules that restrict voting and enrollment by country lists. +/// +/// # Errors +/// +/// Returns an error if Cloudflare credentials, ruleset discovery, or rule API calls fail. #[instrument] pub async fn handle_limit_ip_access_by_countries( tenant_id: String, diff --git a/packages/windmill/src/services/mod.rs b/packages/windmill/src/services/mod.rs index b984321100..85314d10f9 100644 --- a/packages/windmill/src/services/mod.rs +++ b/packages/windmill/src/services/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Windmill worker services: shared database access, Keycloak, exports, crypto, and integration helpers for Celery tasks. + pub mod application; pub mod ballot_styles; pub mod cast_votes; diff --git a/packages/windmill/src/services/password.rs b/packages/windmill/src/services/password.rs index cad10645d7..f39fbd952d 100644 --- a/packages/windmill/src/services/password.rs +++ b/packages/windmill/src/services/password.rs @@ -1,10 +1,14 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Password generation helpers. + use rand::{thread_rng, Rng}; use tracing::{info, instrument}; #[instrument] +/// Generate a random string with a specified charset. pub fn generate_random_string_with_charset(bytes_length: usize, charset: &str) -> String { // Initialize the random number generator let mut rng = thread_rng(); diff --git a/packages/windmill/src/services/pg_lock.rs b/packages/windmill/src/services/pg_lock.rs index 69e1684e4b..cd82d76c35 100644 --- a/packages/windmill/src/services/pg_lock.rs +++ b/packages/windmill/src/services/pg_lock.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Postgres-backed cooperative locks. + use super::database::get_hasura_pool; use crate::postgres::lock; use anyhow::{anyhow, Context, Result}; @@ -12,6 +15,8 @@ use tokio_postgres::row::Row; use tracing::instrument; #[derive(Debug)] +/// Postgres lock. +#[allow(missing_docs)] pub struct PgLock { pub key: String, pub value: String, @@ -19,6 +24,15 @@ pub struct PgLock { } impl PgLock { + /// Extend this lock's expiry in Postgres by two minutes from "now". + /// + /// # Errors + /// + /// Returns an error if the pool, transaction, upsert, or commit fails. + /// + /// # Panics + /// + /// Panics if the new expiry timestamp overflows when adding the fixed TTL. #[instrument(skip(self), err)] pub async fn update_expiry(&self) -> Result<()> { let mut hasura_db_client: DbClient = get_hasura_pool() @@ -47,6 +61,11 @@ impl PgLock { Ok(()) } + /// Insert or update a lock row and commit the transaction. + /// + /// # Errors + /// + /// Returns an error if the pool, transaction, upsert, or commit fails. #[instrument(err)] pub async fn acquire( key: String, @@ -77,6 +96,11 @@ impl PgLock { Ok(lock) } + /// Delete this lock row from Postgres and commit. + /// + /// # Errors + /// + /// Returns an error if the pool, transaction, delete, or commit fails. #[instrument(err)] pub async fn release(self) -> Result<()> { let mut hasura_db_client: DbClient = get_hasura_pool() diff --git a/packages/windmill/src/services/plugins_manager/mod.rs b/packages/windmill/src/services/plugins_manager/mod.rs index 02547e6141..e2aa95b217 100644 --- a/packages/windmill/src/services/plugins_manager/mod.rs +++ b/packages/windmill/src/services/plugins_manager/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! WASM plugin runtime: plugin loading, hook dispatch, and plugin-managed transactions. + pub mod plugin; pub mod plugin_db_manager; pub mod plugin_manager; diff --git a/packages/windmill/src/services/plugins_manager/plugin.rs b/packages/windmill/src/services/plugins_manager/plugin.rs index 097c165a6c..02ea88261e 100644 --- a/packages/windmill/src/services/plugins_manager/plugin.rs +++ b/packages/windmill/src/services/plugins_manager/plugin.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech // // SPDX-License-Identifier: AGPL-3.0-only +//! Low-level WASM plugin types and execution wrapper for hook calls. use crate::services::plugins_manager::plugin_db_manager::{ PluginDbManager, PluginTransactionsManager, TxnHost, }; @@ -29,11 +30,17 @@ use wasmtime_wasi::{WasiCtx, WasiCtxView, WasiView}; /// Represents a value that can be passed to or returned from a plugin hook. #[derive(Debug, Clone)] pub enum HookValue { + /// Signed 32-bit integer. S32(i32), + /// Unsigned 32-bit integer. U32(u32), + /// UTF-8 string. String(String), + /// Boolean. Bool(bool), + /// Component-model result (Ok/Err) optionally containing boxed values. Result(core::result::Result>, Option>>), + /// Optional boxed value. Option(Option>), } @@ -56,6 +63,7 @@ impl From<&str> for HookValue { } impl HookValue { + /// Converts this wrapper into a Wasmtime component [`Val`]. pub fn to_val(&self) -> Val { match self { HookValue::S32(v) => Val::S32(*v), @@ -73,6 +81,11 @@ impl HookValue { } } + /// Converts a Wasmtime component [`Val`] into a [`HookValue`]. + /// + /// # Errors + /// + /// Returns an error for unsupported value shapes. pub fn from_val(val: Val) -> Result { Ok(match val { Val::S32(v) => HookValue::S32(v), @@ -96,6 +109,7 @@ impl HookValue { }) } + /// Returns the contained integer when this is a numeric value. pub fn as_i32(&self) -> Option { match self { HookValue::S32(v) => Some(*v), @@ -104,6 +118,7 @@ impl HookValue { } } + /// Returns the contained string slice when this is a string value. pub fn as_str(&self) -> Option<&str> { match self { HookValue::String(v) => Some(v), @@ -111,6 +126,11 @@ impl HookValue { } } + /// Parses a successful `Result(Some(String))` hook payload as JSON. + /// + /// # Errors + /// + /// Returns an error if the hook value is not a JSON string. pub fn as_results_json(&self) -> Result { match self { HookValue::Result(Ok(Some(boxed_value))) => match &**boxed_value { @@ -131,10 +151,15 @@ impl HookValue { } } +/// Wasmtime store state shared by all plugin instances. pub struct PluginStore { + /// WASI context for plugin execution. pub wasi: WasiCtx, + /// Resource table used by component-model bindings. pub resource_table: ResourceTable, + /// Transaction host used by plugins to run DB operations. pub transactions_manager: PluginTransactionsManager, + /// Authorization host used by plugins. pub plugin_auth: PluginAuth, } @@ -148,16 +173,25 @@ impl WasiView for PluginStore { } #[derive(Clone)] +/// Loaded plugin component plus its manifest and shared instance state. pub struct Plugin { + /// Plugin name from the manifest. pub name: String, + /// Wasmtime component compiled from plugin bytes. pub component: Component, + /// Shared store and instance used to call exports. pub instance: Arc, Instance)>>, + /// Plugin manifest as exposed by the common interface. pub manifest: Manifest, } impl Plugin { /// Initializes a plugin from WASM bytes, setting up the necessary environment and returning a Plugin instance. /// Read Manifest from the plugin's common interface. + /// + /// # Errors + /// + /// Returns an error if compilation, linking, instantiation, or manifest retrieval fails. pub async fn init_plugin_from_wasm_bytes( engine: &Engine, wasm_bytes: Vec, @@ -228,6 +262,10 @@ impl Plugin { // args: Vec - The arguments to pass to the hook. // expected_result: Vec - The expected result values from the hook (call_async will fill these). // Returns a Result containing a vector of HookValue results from the hook call. + /// + /// # Errors + /// + /// Returns an error if the export is missing, the call traps, or result decoding fails. pub async fn call_hook( &self, hook: &str, @@ -265,9 +303,11 @@ impl Plugin { } } +/// Authorization host implementation exposed to plugins. pub struct PluginAuth; impl PluginAuth { + /// Creates a new authorization host. pub fn new() -> Self { PluginAuth } @@ -279,6 +319,7 @@ impl Default for PluginAuth { } } +/// Linker host type used for wiring authorization bindings into the component store. struct AuthHost; impl HasData for AuthHost { diff --git a/packages/windmill/src/services/plugins_manager/plugin_db_manager.rs b/packages/windmill/src/services/plugins_manager/plugin_db_manager.rs index 580ba577c0..6e83314b34 100644 --- a/packages/windmill/src/services/plugins_manager/plugin_db_manager.rs +++ b/packages/windmill/src/services/plugins_manager/plugin_db_manager.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech // // SPDX-License-Identifier: AGPL-3.0-only +//! Plugin-managed database transactions for Hasura and Keycloak connections. use crate::services::database::{get_hasura_pool, get_keycloak_pool}; use deadpool_postgres::{GenericClient, Object, Transaction}; use serde_json::{Value, Map}; @@ -16,6 +17,7 @@ use uuid::Uuid; use wasmtime::component::HasData; #[ouroboros::self_referencing] +/// Holds an optional Postgres client and an optional transaction borrowing from it. pub struct PluginDbManager { client: Option, @@ -25,6 +27,7 @@ pub struct PluginDbManager { txn: Option>, } +/// Host marker used by the transactions-manager component bindings. pub struct TxnHost; impl HasData for TxnHost { @@ -32,18 +35,27 @@ impl HasData for TxnHost { } impl PluginDbManager { + /// Creates an empty manager (no client, no transaction). + /// + /// # Panics + /// + /// Panics if the self-referential wrapper cannot be constructed. pub fn init() -> Self { PluginDbManager::try_new(None, |_client_ref| Ok(None) as Result<_, String>) .expect("Failed to create TransactionComponent") } } +/// Wrapper exposing Hasura and Keycloak transaction managers to the plugin host interface. pub struct PluginTransactionsManager { + /// Hasura transaction manager (Postgres). hasura_manager: Arc>, + /// Keycloak transaction manager (Postgres). keycloak_manager: Arc>, } impl PluginTransactionsManager { + /// Creates a new transactions manager from pre-initialized per-database managers. pub fn new( hasura_manager: Arc>, keycloak_manager: Arc>, @@ -55,10 +67,16 @@ impl PluginTransactionsManager { } } +/// Parses any valid UUID string. pub fn parse_any_valid_uuid(s: &str) -> Option { Uuid::parse_str(s).ok() } +/// Converts Postgres row results into a JSON array string. +/// +/// # Errors +/// +/// Returns an error if serialization fails. fn parsed_transactions_query_results( results: Vec, ) -> Result> { @@ -102,7 +120,7 @@ fn parsed_transactions_query_results( Ok(json_string) } -//Implementing the Host trait for PluginTransactionsManager to handle database transactions +/// Implementing the Host trait for PluginTransactionsManager to handle database transactions impl Host for PluginTransactionsManager { async fn create_hasura_transaction(&mut self) -> Result<(), String> { let mut manager = self.hasura_manager.lock().await; diff --git a/packages/windmill/src/services/plugins_manager/plugin_manager.rs b/packages/windmill/src/services/plugins_manager/plugin_manager.rs index def2a794b6..c07f39dbc3 100644 --- a/packages/windmill/src/services/plugins_manager/plugin_manager.rs +++ b/packages/windmill/src/services/plugins_manager/plugin_manager.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech // // SPDX-License-Identifier: AGPL-3.0-only +//! High-level plugin registry that loads plugins and dispatches hooks to registered plugins. use crate::services::plugins_manager::plugin::{HookValue, Plugin}; use anyhow::{anyhow, Context, Result}; use dashmap::DashMap; @@ -14,16 +15,26 @@ use serde_json::Value; use std::sync::Arc; use wasmtime::{Config, Engine}; +/// In-memory plugin registry plus dispatch tables (hooks, routes, tasks). pub struct PluginManager { + /// Loaded plugins keyed by plugin name. pub plugins: DashMap>, + /// Hook name → plugin names that registered it. pub hooks: DashMap>, // (hook, list of plugin names) + /// Route path → (handler, plugin_name). pub routes: DashMap, // (path, (handler, plugin_name)) - Routes remain 1:1 - pub tasks: DashMap>, // (task, list of plugin names) + /// Task handler → plugin names that registered it. + pub tasks: DashMap>, // (task, list of plugin names) + /// Shared Wasmtime engine used to compile components. pub engine: Engine, } impl PluginManager { /// Creates a new PluginManager instance with an async-enabled Wasmtime engine and empty plugin registries. + /// + /// # Errors + /// + /// Returns an error if the Wasmtime engine cannot be created. pub fn new() -> Result { let mut config = Config::new(); config.async_support(true); @@ -39,6 +50,10 @@ impl PluginManager { } /// Loads all plugin WASM files from the S3 bucket, initializes them, and registers their hooks, routes, and tasks. + /// + /// # Errors + /// + /// Returns an error if S3 listing/download fails or a plugin fails to initialize. pub async fn load_plugins(&self) -> Result<()> { let bucket: String = get_public_bucket().context("failed to get public S3 bucket")?; let wasms_files: Vec<(String, Vec)> = @@ -93,6 +108,10 @@ impl PluginManager { /// Calls a hook by name on all plugins that registered for it, passing arguments and expected result values. /// Returns a vector of results from each plugin. + /// + /// # Errors + /// + /// Returns an error if no plugin registered the hook, a plugin is missing, a task panics, or the call fails. pub async fn call_hook( &self, hook: &str, @@ -152,6 +171,10 @@ impl PluginManager { } /// Calls a registered route handler by path, passing a JSON string as input, and returns the JSON value. + /// + /// # Errors + /// + /// Returns an error if the route is not found, the plugin is missing, the call fails, or the result is not JSON. pub async fn call_route(&self, path: &str, input_json: String) -> Result { if let Some(route_entry) = self.routes.get(path) { let (handler, plugin_name) = route_entry.value(); @@ -185,7 +208,11 @@ impl PluginManager { } } - /// Executes a registered task by name, passing a JSON string as input, on all plugins that registered for the task. + /// EExecutes a registered task by name, passing a JSON string as input, on all plugins that registered for the task. + /// + /// # Errors + /// + /// Returns an error if the task is not registered, a plugin is missing, or the call fails. pub async fn execute_task(&self, task: &str, input_json: String) -> Result<()> { let plugin_names = self .tasks @@ -208,6 +235,7 @@ impl PluginManager { Ok(()) } + /// Returns the task handler for `path` when the route maps to a handler registered as a task. pub fn get_route_task_handler(&self, path: &str) -> Option { if let Some(route_entry) = self.routes.get(path) { let (handler, _plugin_name) = route_entry.value(); @@ -219,9 +247,18 @@ impl PluginManager { } } +/// Global plugin manager singleton. static PLUGIN_MANAGER: OnceCell = OnceCell::new(); /// Returns a reference to the global PluginManager singleton, initializing it if necessary. +/// +/// # Panics +/// +/// Panics if initialization succeeds but the singleton cannot be retrieved. +/// +/// # Errors +/// +/// Returns an error if initialization fails. pub async fn get_plugin_manager() -> Result<&'static PluginManager> { let plugin_manager = match PLUGIN_MANAGER.get() { Some(manager) => manager, @@ -238,6 +275,10 @@ pub async fn get_plugin_manager() -> Result<&'static PluginManager> { } /// Initializes the global PluginManager singleton and loads all plugins from S3. +/// +/// # Errors +/// +/// Returns an error if initialization or plugin loading fails. pub async fn init_plugin_manager() -> Result<()> { if PLUGIN_MANAGER.get().is_some() { return Ok(()); diff --git a/packages/windmill/src/services/plugins_manager/plugins_hooks.rs b/packages/windmill/src/services/plugins_manager/plugins_hooks.rs index 819791a6db..675b894f41 100644 --- a/packages/windmill/src/services/plugins_manager/plugins_hooks.rs +++ b/packages/windmill/src/services/plugins_manager/plugins_hooks.rs @@ -1,23 +1,28 @@ // SPDX-FileCopyrightText: 2025 Sequent Legal // // SPDX-License-Identifier: AGPL-3.0-only +//! Convenience trait that exposes strongly-typed plugin hooks on [`PluginManager`]. use crate::services::plugins_manager::plugin::HookValue; use crate::services::plugins_manager::plugin_manager::PluginManager; use anyhow::{anyhow, Result}; use async_trait::async_trait; use serde_json::Value; -// This module defines the hooks implementation for the plugin system. -// Each plugin hook is a method that can be called by the plugin manager to interact with plugins. +/// This module defines the hooks implementation for the plugin system. +/// Each plugin hook is a method that can be called by the plugin manager to interact with plugins. #[async_trait] pub trait PluginHooks { - //Add plugins hooks here + /// Calls the `create-transmission-package` hook and returns its string result. async fn create_transmission_package(&self, input: Value) -> Result; } #[async_trait] impl PluginHooks for PluginManager { - //Implement the PluginHooks trait for PluginManager + /// Calls the corresponding plugin hook and unwraps the first plugin result as a string. + /// + /// # Errors + /// + /// Returns an error if the hook call fails or returns an unexpected shape. async fn create_transmission_package(&self, input: Value) -> Result { let res: Vec> = self .call_hook( diff --git a/packages/windmill/src/services/private_keys.rs b/packages/windmill/src/services/private_keys.rs index fb8c25ac73..7a51043c1b 100644 --- a/packages/windmill/src/services/private_keys.rs +++ b/packages/windmill/src/services/private_keys.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Trustee encrypted private key retrieval for bulletin-board and ceremony workflows. + use anyhow::Result; use base64::engine::general_purpose; use base64::Engine; @@ -14,6 +16,11 @@ use tracing::instrument; use super::protocol_manager; use super::public_keys::deserialize_public_key; +/// Fetch a trustee's encrypted private key bytes for a board, returned as standard Base64 (no pad). +/// +/// # Errors +/// +/// Returns an error if the public key cannot be parsed or immudb/vault retrieval fails. #[instrument(err)] pub async fn get_trustee_encrypted_private_key( board_name: &str, diff --git a/packages/windmill/src/services/probe.rs b/packages/windmill/src/services/probe.rs index a954f36197..83d29eaccc 100644 --- a/packages/windmill/src/services/probe.rs +++ b/packages/windmill/src/services/probe.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Readiness and liveness checks. + use crate::services::celery_app::{get_celery_app, get_celery_connection, get_queues, Queue}; use crate::services::database::{get_hasura_pool, get_keycloak_pool}; use crate::services::jwks::get_jwks_secret_path; @@ -19,12 +22,15 @@ use tracing::{error, info, instrument, warn}; use super::celery_app::get_is_app_active; #[derive(Display, Debug, Eq, PartialEq, Clone)] +/// Application name. +#[allow(missing_docs)] pub enum AppName { BEAT, HARVEST, WINDMILL, } +/// Broker reconnect timeout for Celery health checks (seconds). const BROKER_CONNECTION_TIMEOUT: u32 = 2; lazy_static! { @@ -35,6 +41,7 @@ lazy_static! { }; } +/// Probe Celery broker connectivity and consumer health for configured queues. #[instrument(ret)] async fn check_celery(_app_name: &AppName) -> Option { let celery_app = get_celery_app().await; @@ -87,6 +94,7 @@ async fn check_celery(_app_name: &AppName) -> Option { } } +/// Probe Hasura Postgres pool by acquiring a connection within [`DB_TIMEOUTS`]. #[instrument(ret)] async fn check_hasura_db(app_name: &AppName) -> Option { if AppName::BEAT == *app_name { @@ -107,6 +115,7 @@ async fn check_hasura_db(app_name: &AppName) -> Option { } } +/// Probe Keycloak Postgres pool by acquiring a connection within [`DB_TIMEOUTS`]. #[instrument(ret)] async fn check_keycloak_db(app_name: &AppName) -> Option { if AppName::BEAT == *app_name { @@ -128,6 +137,7 @@ async fn check_keycloak_db(app_name: &AppName) -> Option { } } +/// Probe AWS Secrets Manager / vault master secret accessibility. #[instrument(ret)] async fn check_aws_secrets(app_name: &AppName) -> Option { if AppName::BEAT == *app_name { @@ -143,6 +153,7 @@ async fn check_aws_secrets(app_name: &AppName) -> Option { } } +/// Probe public S3 by reading the configured JWKS object path. #[instrument(ret)] async fn check_s3(app_name: &AppName) -> Option { if AppName::BEAT == *app_name { @@ -166,6 +177,7 @@ async fn check_s3(app_name: &AppName) -> Option { } } +/// Probe SMS transport (e.g. AWS SNS attributes) when the app sends outbound SMS. #[instrument(ret)] async fn check_sms_sender(app_name: &AppName) -> Option { if AppName::BEAT == *app_name || AppName::HARVEST == *app_name { @@ -201,6 +213,7 @@ async fn check_sms_sender(app_name: &AppName) -> Option { } } +/// Run all configured readiness checks; returns true when every probe passes or is skipped. #[instrument(ret)] async fn readiness_test(app_name: &AppName) -> bool { // Use futures::join! to await multiple futures concurrently @@ -230,6 +243,7 @@ async fn readiness_test(app_name: &AppName) -> bool { data.iter().all(|&x| x.is_none() || x == Some(true)) } +/// Setup the probe for the application. pub async fn setup_probe(app_name: AppName) { let app = app_name.to_string(); let addr_s = std::env::var(format!("{}_PROBE_ADDR", app)).unwrap_or("0.0.0.0:3030".to_string()); diff --git a/packages/windmill/src/services/protocol_manager.rs b/packages/windmill/src/services/protocol_manager.rs index 7f48d914ce..a46f5cb591 100644 --- a/packages/windmill/src/services/protocol_manager.rs +++ b/packages/windmill/src/services/protocol_manager.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Managing protocol manager key generation, serialization, and bulletin-board signing. + use b3::client::pgsql::{PgsqlB3Client, PgsqlConnectionParams}; use b3::messages::artifact::Shares; use b3::messages::artifact::{Ballots, Channel, Configuration, DkgPublicKey, TrusteeShareData}; @@ -30,10 +32,16 @@ use electoral_log::BoardClient; use immudb_rs::{sql_value::Value, Client, NamedParam, SqlValue}; use strand::signature::{StrandSignaturePk, StrandSignatureSk}; +/// Get the protocol manager secret path for a board. pub fn get_protocol_manager_secret_path(board_name: &str) -> String { format!("boards/{board_name}/protocol-manager") } +/// Generate a protocol manager signing key and persist its TOML config to the vault. +/// +/// # Errors +/// +/// Returns an error if key generation, serialization, or vault write fails. #[instrument(skip(hasura_transaction), err)] pub async fn create_protocol_manager_keys( hasura_transaction: &Transaction<'_>, @@ -57,6 +65,11 @@ pub async fn create_protocol_manager_keys( Ok(()) } +/// Create a new in-memory [`ProtocolManager`] with a fresh signing key. +/// +/// # Errors +/// +/// Returns an error if the signing key cannot be generated. #[instrument] pub fn gen_protocol_manager() -> Result> { let pmkey: StrandSignatureSk = StrandSignatureSk::gen().map_err(|err| anyhow!("{:?}", err))?; @@ -68,6 +81,11 @@ pub fn gen_protocol_manager() -> Result> { Ok(pm) } +/// Serialize a [`ProtocolManager`] to TOML for storage in the vault. +/// +/// # Errors +/// +/// Returns an error if the config cannot be encoded to TOML. #[instrument] pub fn serialize_protocol_manager(pm: &ProtocolManager) -> Result { let pmc = ProtocolManagerConfig::from(pm); @@ -75,6 +93,11 @@ pub fn serialize_protocol_manager(pm: &ProtocolManager) -> Result(contents: String) -> Result> { let pmc: ProtocolManagerConfig = toml::from_str(&contents).map_err(|err| anyhow!("{:?}", err))?; @@ -83,6 +106,11 @@ pub fn deserialize_protocol_manager(contents: String) -> Result( b3_client: &mut PgsqlB3Client, configuration: Configuration, @@ -97,6 +125,11 @@ async fn init( } #[instrument(skip(pm), err)] +/// Add a configuration to a B3 board. +/// +/// # Errors +/// +/// Returns an error if the B3 client cannot be created or configuration insertion fails. pub async fn add_config_to_board( threshold: usize, board_name: &str, @@ -118,6 +151,11 @@ pub async fn add_config_to_board( } #[instrument(err)] +/// Get the public key for a B3 board. +/// +/// # Errors +/// +/// Returns an error if board messages cannot be retrieved, configuration is missing, or parsing fails. pub async fn get_board_public_key(board_name: &str) -> Result { let mut board = get_b3_pgsql_client().await?; @@ -165,6 +203,11 @@ pub async fn get_board_public_key(board_name: &str) -> Result { Ok(dkgpk.pk) } +/// Check whether a configuration message exists on the board. +/// +/// # Errors +/// +/// Returns an error if board messages cannot be retrieved or decoded. pub async fn check_configuration_exists(board_name: &str) -> Result { let board = get_b3_pgsql_client().await?; @@ -178,6 +221,11 @@ pub async fn check_configuration_exists(board_name: &str) -> Result { } #[instrument(err)] +/// Get the public key messages for a B3 board. +/// +/// # Errors +/// +/// Returns an error if board messages cannot be retrieved or decoded. pub async fn get_board_public_key_messages(board_name: &str) -> Result> { let board = get_b3_pgsql_client().await?; @@ -203,6 +251,11 @@ pub async fn get_board_public_key_messages(board_name: &str) -> Result( board_name: &str, trustee_pub_key: &StrandSignaturePk, @@ -259,6 +312,11 @@ pub async fn get_trustee_encrypted_private_key( } #[instrument(skip_all, err)] +/// Get the configuration for a B3 board. +/// +/// # Errors +/// +/// Returns an error if the configuration statement is missing or cannot be decoded. pub fn get_configuration(messages: &[Message]) -> Result> { let configuration_msg = messages .iter() @@ -276,6 +334,11 @@ pub fn get_configuration(messages: &[Message]) -> Result(messages: &[Message]) -> Result { let public_key_message = messages .iter() @@ -294,6 +357,11 @@ pub fn get_public_key_hash(messages: &[Message]) -> Result( configuration: &Configuration, trustee_pks: Vec, @@ -321,6 +389,11 @@ pub fn generate_trustee_set( } #[instrument(skip_all, err)] +/// Convert B3 messages to a vector of Messages. +/// +/// # Errors +/// +/// Returns an error if any board message cannot be decoded. pub fn convert_b3(b3: &[B3MessageRow]) -> Result> { let messages: Vec = b3 .iter() @@ -330,6 +403,11 @@ pub fn convert_b3(b3: &[B3MessageRow]) -> Result> { } #[instrument(err)] +/// Get the protocol manager for a B3 board. +/// +/// # Errors +/// +/// Returns an error if the protocol manager secret cannot be read or deserialized. pub async fn get_protocol_manager( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -349,6 +427,11 @@ pub async fn get_protocol_manager( } #[instrument(skip(b3_client), err)] +/// Get the messages for a B3 board. +/// +/// # Errors +/// +/// Returns an error if the board messages cannot be retrieved or decoded. pub async fn get_b3( board_name: &str, b3_client: &mut PgsqlB3Client, @@ -369,6 +452,11 @@ pub async fn get_b3( ), err )] +/// Add ballots to a B3 board. +/// +/// # Errors +/// +/// Returns an error if the ballots cannot be added to the board. pub async fn add_ballots_to_board( pm: &ProtocolManager, b3_client: &mut PgsqlB3Client, @@ -413,6 +501,11 @@ pub async fn add_ballots_to_board( } #[instrument(err)] +/// Get a board client for Immudb. +/// +/// # Errors +/// +/// Returns an error if the board client cannot be created. pub async fn get_board_client() -> Result { let username = env::var("IMMUDB_USER").context("IMMUDB_USER must be set")?; let password = env::var("IMMUDB_PASSWORD").context("IMMUDB_PASSWORD must be set")?; @@ -424,6 +517,11 @@ pub async fn get_board_client() -> Result { } #[instrument(err)] +/// Get a B3 client for PostgreSQL. +/// +/// # Errors +/// +/// Returns an error if the B3 client cannot be created. pub async fn get_b3_pgsql_client() -> Result { let username = env::var("B3_PG_USER").context("B3_PG_USER must be set")?; let password = env::var("B3_PG_PASSWORD").context("B3_PG_PASSWORD must be set")?; @@ -441,6 +539,11 @@ pub async fn get_b3_pgsql_client() -> Result { } #[instrument(err)] +/// Get a client for Immudb. +/// +/// # Errors +/// +/// Returns an error if the client cannot be created. pub async fn get_immudb_client() -> Result { let username = env::var("IMMUDB_USER").context("IMMUDB_USER must be set")?; let password = env::var("IMMUDB_PASSWORD").context("IMMUDB_PASSWORD must be set")?; @@ -452,6 +555,7 @@ pub async fn get_immudb_client() -> Result { Ok(client) } +/// Create a named parameter for an Immudb query. pub fn create_named_param(name: String, value: Value) -> NamedParam { NamedParam { name, @@ -459,6 +563,7 @@ pub fn create_named_param(name: String, value: Value) -> NamedParam { } } +/// Get the board name for an election event. pub fn get_event_board(tenant_id: &str, election_event_id: &str, slug: &str) -> String { let tenant: String = tenant_id .to_string() @@ -472,6 +577,7 @@ pub fn get_event_board(tenant_id: &str, election_event_id: &str, slug: &str) -> .collect() } +/// Get the board name for an election. pub fn get_election_board(tenant_id: &str, election_id: &str, slug: &str) -> String { let tenant: String = tenant_id .to_string() @@ -485,6 +591,11 @@ pub fn get_election_board(tenant_id: &str, election_id: &str, slug: &str) -> Str .collect() } +/// Convert B3 messages to a vector of Messages. +/// +/// # Errors +/// +/// Returns an error if any board message cannot be decoded. pub fn convert_board_messages(board_messages: &[B3MessageRow]) -> Result> { let messages: Vec = board_messages .iter() @@ -493,6 +604,11 @@ pub fn convert_board_messages(board_messages: &[B3MessageRow]) -> Result( board_name: &str, b3_client: &PgsqlB3Client, diff --git a/packages/windmill/src/services/providers/email_sender.rs b/packages/windmill/src/services/providers/email_sender.rs index 0be675cd84..b4fffaf04a 100644 --- a/packages/windmill/src/services/providers/email_sender.rs +++ b/packages/windmill/src/services/providers/email_sender.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Email delivery via AWS SES, SMTP, or a console logger. use crate::types::error::Result; use anyhow::anyhow; @@ -20,30 +21,49 @@ use lettre::Transport; use sequent_core::util::aws::get_from_env_aws_config; use serde::Deserialize; +/// File attachment metadata and raw bytes. pub struct Attachment { + /// Attachment filename. pub filename: String, + /// MIME type for the attachment content. pub mimetype: String, + /// Attachment payload bytes. pub content: Vec, } #[derive(Debug, Deserialize)] +/// SMTP transport configuration. struct SmtpConfig { + /// SMTP URL as accepted by `lettre`. server_url: String, + /// Timeout in seconds. timeout: Option, // in seconds } +/// Transport backend selected for email delivery. pub enum EmailTransport { + /// AWS SESv2 client used to send raw MIME messages. AwsSes(AwsSesClient), + /// SMTP transport configured from `EMAIL_TRANSPORT_CONFIG`. Smtp(SmtpTransport), + /// Log-only transport (no delivery). Console, } +/// Email sender configured from environment variables. pub struct EmailSender { + /// Selected transport backend. transport: EmailTransport, + /// From address used when building messages. email_from: String, } impl EmailSender { + /// Builds an [`EmailSender`] using environment configuration. + /// + /// # Errors + /// + /// Returns an error if env/config parsing fails. #[instrument(err)] pub async fn new() -> Result { let email_from = std::env::var("EMAIL_FROM") @@ -95,6 +115,11 @@ impl EmailSender { }) } + /// Sends a MIME email with optional HTML and attachments. + /// + /// # Errors + /// + /// Returns an error if message building or delivery fails. #[instrument(skip(self, plaintext_body, html_body, attachments), err)] pub async fn send( &self, diff --git a/packages/windmill/src/services/providers/mod.rs b/packages/windmill/src/services/providers/mod.rs index 6ae8a47058..136faed536 100644 --- a/packages/windmill/src/services/providers/mod.rs +++ b/packages/windmill/src/services/providers/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Provider wrappers for external delivery (email/SMS) and transactional helpers. + pub mod email_sender; pub mod sms_sender; pub mod transactions_provider; diff --git a/packages/windmill/src/services/providers/sms_sender.rs b/packages/windmill/src/services/providers/sms_sender.rs index 1cb868aae3..3dc9654150 100644 --- a/packages/windmill/src/services/providers/sms_sender.rs +++ b/packages/windmill/src/services/providers/sms_sender.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! SMS delivery via AWS SNS or a console logger. use crate::types::error::Result; use anyhow::anyhow; @@ -10,18 +11,29 @@ use sequent_core::util::aws::get_from_env_aws_config; use std::collections::HashMap; use tracing::{event, instrument, Level}; +/// Optional set of AWS SNS message attributes applied to each published message. type MessageAttributes = Option>; +/// Transport backend selected for SMS delivery. pub enum SmsTransport { + /// AWS SNS client plus optional per-message attributes. AwsSns((AwsSnsClient, MessageAttributes)), + /// Log-only transport (no delivery). Console, } +/// SMS sender configured from environment variables. pub struct SmsSender { + /// Active SMS transport. pub transport: SmsTransport, } impl SmsSender { + /// Builds an [`SmsSender`] from environment configuration. + /// + /// # Errors + /// + /// Returns an error if env/config parsing fails. #[instrument(err)] pub async fn new() -> Result { let sms_transport_name = std::env::var("SMS_TRANSPORT_NAME") @@ -66,6 +78,11 @@ impl SmsSender { }) } + /// Sends a single SMS message. + /// + /// # Errors + /// + /// Returns an error if delivery fails. #[instrument(skip(self, message), err)] pub async fn send(&self, receiver: String, message: String) -> Result<()> { match self.transport { diff --git a/packages/windmill/src/services/providers/transactions_provider.rs b/packages/windmill/src/services/providers/transactions_provider.rs index 93999b3631..4acde93da7 100644 --- a/packages/windmill/src/services/providers/transactions_provider.rs +++ b/packages/windmill/src/services/providers/transactions_provider.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Transaction helpers for Hasura Postgres, SQLite, and ImmuDB. use crate::services::database::get_hasura_pool; use crate::services::protocol_manager::get_immudb_client; use anyhow::{anyhow, Context, Result}; @@ -15,6 +16,11 @@ use std::pin::Pin; use tokio::task; use tracing::instrument; +/// Runs `handler` within a Postgres transaction on `db_client`, committing on success and rolling back on error. +/// +/// # Errors +/// +/// Returns an error if the transaction cannot be started/committed/rolled back, or if `handler` fails. #[instrument(skip(handler), err)] pub async fn provide_transaction(handler: F, mut db_client: DbClient) -> Result<()> where @@ -43,6 +49,11 @@ where Ok(()) } +/// Runs `handler` within a SQLite transaction opened from `database_path`. +/// +/// # Errors +/// +/// Returns an error if the DB cannot be opened, the transaction fails, or `handler` fails. #[instrument(skip(handler), err)] pub async fn provide_sqlite_transaction(handler: F, database_path: &Path) -> Result<()> where @@ -73,6 +84,11 @@ where .await? } +/// Convenience wrapper over [`provide_transaction`] using the shared Hasura pool. +/// +/// # Errors +/// +/// Returns an error if acquiring the pool client or running the transaction fails. #[instrument(skip(handler), err)] pub async fn provide_hasura_transaction(handler: F) -> Result<()> where @@ -87,6 +103,11 @@ where provide_transaction(handler, hasura_db_client).await } +/// Runs `handler` within an ImmuDB transaction (`new_tx`) scoped to `immudb_db`. +/// +/// # Errors +/// +/// Returns an error if session/tx lifecycle operations fail or if `handler` fails. #[instrument(skip(handler), err)] pub async fn provide_transaction_immudb( handler: F, @@ -139,6 +160,11 @@ where Ok(()) } +/// Convenience wrapper over [`provide_transaction_immudb`] using the shared ImmuDB client. +/// +/// # Errors +/// +/// Returns an error if acquiring the client or running the transaction fails. #[instrument(skip(handler), err)] pub async fn provide_immudb_transaction(handler: F, immudb_db: &str) -> Result<()> where diff --git a/packages/windmill/src/services/public_keys.rs b/packages/windmill/src/services/public_keys.rs index 7158967e73..18342bb8e3 100644 --- a/packages/windmill/src/services/public_keys.rs +++ b/packages/windmill/src/services/public_keys.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Public key parsing and validation for trustees, boards, and verification. + use anyhow::{anyhow, Result}; use base64::engine::general_purpose; use base64::Engine; @@ -15,11 +17,22 @@ use tracing::{info, instrument}; use super::protocol_manager; #[instrument(err)] +/// Parse a Base64-encoded DER public key into a `StrandSignaturePk`. +/// +/// # Errors +/// +/// Returns an error if the key cannot be decoded or parsed. pub fn deserialize_public_key(public_key_string: String) -> Result { StrandSignaturePk::from_der_b64_string(&public_key_string).map_err(|err| anyhow!("{:?}", err)) } #[instrument(skip(trustee_pks, threshold), err)] +/// Create an election-event board configuration with the provided trustee public keys. +/// +/// # Errors +/// +/// Returns an error if the protocol manager cannot be loaded, trustee keys cannot be parsed, +/// or board configuration insertion fails. pub async fn create_keys( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -54,6 +67,11 @@ pub async fn create_keys( } #[instrument(err)] +/// Read and return the board's public key, encoded as Base64 (no padding). +/// +/// # Errors +/// +/// Returns an error if the key cannot be retrieved or serialized. pub async fn get_public_key(board_name: String) -> Result { let pk = protocol_manager::get_board_public_key::(board_name.as_str()).await?; let pk_bytes = pk.strand_serialize()?; diff --git a/packages/windmill/src/services/reports/activity_log.rs b/packages/windmill/src/services/reports/activity_log.rs index 38724fc8e2..d947dcf8d2 100644 --- a/packages/windmill/src/services/reports/activity_log.rs +++ b/packages/windmill/src/services/reports/activity_log.rs @@ -2,6 +2,13 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Activity log report template renderer and export helpers. +//! +//! This report can be generated as: +//! - **PDF**, rendered through the shared template pipeline (batched when large). +//! - **CSV**, exported directly from the electoral-log board by streaming entries +//! in batches into a temporary file. + use super::template_renderer::*; use crate::postgres::reports::{Report, ReportType}; use crate::services::documents::upload_and_return_document; @@ -27,12 +34,16 @@ use tempfile::NamedTempFile; use tracing::{debug, info, instrument, warn}; #[derive(Serialize, Deserialize, Debug, Clone, EnumString, PartialEq, Copy)] +/// Output format for activity-log reports. +#[allow(missing_docs)] pub enum ReportFormat { CSV, PDF, } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Row rendered into the PDF user template for activity logs. +#[allow(clippy::missing_docs_in_private_items)] pub struct ActivityLogRow { id: i64, created: String, @@ -45,35 +56,53 @@ pub struct ActivityLogRow { user_id: String, } -/// Struct for User Data -/// act_log is for PDF -/// electoral_log is for CSV #[derive(Serialize, Deserialize, Debug, Clone)] +/// User-side data for activity log report generation. pub struct UserData { + /// Converted activity-log rows for PDF. pub act_log: Vec, + /// Raw electoral-log rows for CSV. pub electoral_log: Vec, } -/// Struct for System Data #[derive(Serialize, Deserialize, Debug, Clone)] +/// System-side variables used by the activity-log system template. pub struct SystemData { + /// User template already rendered with `UserData`. pub rendered_user_template: String, } /// Implementation of TemplateRenderer for Activity Logs #[derive(Debug)] +/// Renderer for the activity logs report templates. +#[allow(clippy::missing_docs_in_private_items)] pub struct ActivityLogsTemplate { ids: ReportOrigins, report_format: ReportFormat, } impl ActivityLogsTemplate { + /// Creates a renderer bound to a specific tenant/event. pub fn new(ids: ReportOrigins, report_format: ReportFormat) -> Self { ActivityLogsTemplate { ids, report_format } } // Export data using the electoral-log board client, streaming in batches #[instrument(err, skip(self))] + /// Streams electoral-log entries into a temporary CSV file. + /// + /// This uses the board client directly so the export does not require + /// materializing all entries in memory. + /// + /// # Errors + /// + /// Returns an error if the board cannot be accessed, the file cannot be + /// created, or any row cannot be serialized. + /// + /// # Panics + /// + /// Panics if the computed batch size overflows `usize` while estimating the + /// memory footprint of a fetched batch. pub async fn generate_export_csv_data(&self, name: &str) -> Result { let limit = IMMUDB_ROWS_LIMIT as i64; let mut last_id: i64 = 0; @@ -258,6 +287,11 @@ impl TemplateRenderer for ActivityLogsTemplate { fn prefix(&self) -> String { format!("activity_logs_{}", rand::random::()) } + /// Returns the total number of electoral-log messages for the event board. + /// + /// # Errors + /// + /// Returns an error if the board cannot be accessed or counted. async fn count_items(&self, _hasura_transaction: &Transaction<'_>) -> Result> { let mut client = get_board_client().await?; let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; @@ -274,6 +308,15 @@ impl TemplateRenderer for ActivityLogsTemplate { } #[instrument(err, skip_all)] + /// Fetches one batch of activity-log entries for rendering. + /// + /// The caller uses offset-based pagination so batches can be rendered in + /// parallel. Depending on `report_format`, the fetched messages are converted + /// either into `ActivityLogRow` values (PDF) or `ElectoralLogRow` values (CSV). + /// + /// # Errors + /// + /// Returns an error if the board cannot be queried or any entry cannot be converted. async fn prepare_user_data_batch( &self, _hasura_transaction: &Transaction<'_>, @@ -316,6 +359,11 @@ impl TemplateRenderer for ActivityLogsTemplate { } #[instrument(err, skip_all)] + /// Disabled for this report type; activity logs are fetched in batches. + /// + /// # Errors + /// + /// Always returns an error directing callers to use batching. async fn prepare_user_data( &self, _hasura_transaction: &Transaction<'_>, @@ -327,6 +375,11 @@ impl TemplateRenderer for ActivityLogsTemplate { } #[instrument(err, skip_all)] + /// Prepares system-side variables used by the system template. + /// + /// # Errors + /// + /// Returns an error if public-assets configuration cannot be resolved. async fn prepare_system_data( &self, rendered_user_template: String, @@ -341,6 +394,11 @@ impl TemplateRenderer for ActivityLogsTemplate { } #[instrument(err, skip_all)] + /// Executes report generation, overriding behavior for CSV exports. + /// + /// # Errors + /// + /// Returns an error if generation, upload, or optional email notification fails. async fn execute_report( &self, document_id: &str, @@ -449,8 +507,15 @@ impl TemplateRenderer for ActivityLogsTemplate { } } -// Export data #[instrument(err, skip(act_log))] +/// Writes electoral-log rows into a temporary CSV file. +/// +/// This helper is used by legacy CSV generation paths that already have rows in +/// memory. +/// +/// # Errors +/// +/// Returns an error if the temp file cannot be created or writing/serialization fails. pub async fn generate_export_data( act_log: &[ElectoralLogRow], name: &str, diff --git a/packages/windmill/src/services/reports/ballot_images.rs b/packages/windmill/src/services/reports/ballot_images.rs index 7874974bc4..81bcb02733 100644 --- a/packages/windmill/src/services/reports/ballot_images.rs +++ b/packages/windmill/src/services/reports/ballot_images.rs @@ -1,6 +1,12 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Ballot-images report template renderer. +//! +//! This report renders a PDF containing ballot images and associated computed +//! data produced by the `velvet` pipeline. + use super::template_renderer::*; use crate::postgres::reports::ReportType; use crate::services::temp_path::*; @@ -15,29 +21,41 @@ use tracing::instrument; use velvet::pipes::ballot_images::ComputedTemplateData; #[derive(Serialize, Deserialize, Debug, Clone)] +/// System-side variables used by the ballot-images system template. pub struct SystemData { + /// User template already rendered with `UserData`. pub rendered_user_template: String, + /// URL or path to the QR-code JS library used by the PDF rendering backend. pub file_qrcode_lib: String, + /// Report title to display in the report. pub title: String, } #[derive(Debug, Serialize, Deserialize, Clone)] +/// Additional user-facing strings used by the user template. pub struct UserExtraData { + /// Report title to render in the user template. pub title: String, } #[derive(Debug, Serialize, Deserialize, Clone)] +/// User-side data for ballot-images rendering. pub struct UserData { + /// Computed ballot image data produced by the `velvet` pipeline. pub data: ComputedTemplateData, + /// Small extra strings passed alongside computed data. pub extra_data: UserExtraData, } #[derive(Debug)] +/// Renderer for the ballot-images report templates. +#[allow(missing_docs_in_private_items)] pub struct BallotImagesTemplate { ids: ReportOrigins, } impl BallotImagesTemplate { + /// Creates a renderer bound to a specific tenant/event (and optionally election/template). pub fn new(ids: ReportOrigins) -> Self { BallotImagesTemplate { ids } } @@ -86,6 +104,11 @@ impl TemplateRenderer for BallotImagesTemplate { } #[instrument(err, skip_all)] + /// Prepares the user-side data for this report type. + /// + /// # Errors + /// + /// Currently unimplemented for this report type. async fn prepare_user_data( &self, hasura_transaction: &Transaction<'_>, @@ -94,6 +117,12 @@ impl TemplateRenderer for BallotImagesTemplate { Err(anyhow::anyhow!("Unimplemented")) } #[instrument(err, skip_all)] + /// Prepares system-side variables used by the system template. + /// + /// # Errors + /// + /// Returns an error if public-assets configuration cannot be resolved when + /// rendering is performed in-place. async fn prepare_system_data( &self, rendered_user_template: String, diff --git a/packages/windmill/src/services/reports/ballot_receipt.rs b/packages/windmill/src/services/reports/ballot_receipt.rs index 668330a77c..f256504a65 100644 --- a/packages/windmill/src/services/reports/ballot_receipt.rs +++ b/packages/windmill/src/services/reports/ballot_receipt.rs @@ -1,6 +1,13 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Ballot receipt report template renderer. +//! +//! This report is generated from the voting portal after casting a vote. It +//! verifies the ballot exists in the cast-vote records and renders a PDF receipt +//! containing the ballot tracker URL and a timestamp. + use super::template_renderer::*; use crate::postgres::reports::ReportType; use crate::postgres::{self}; @@ -20,42 +27,65 @@ use serde::{Deserialize, Serialize}; use tracing::instrument; use uuid::Uuid; -/// Wrapper struct for data specific to the ballot and the voter -/// which won't be needed in the preview mode. +/// Ballot- and voter-specific inputs required to generate a real receipt. +/// +/// This data is not required for preview mode, where the renderer reads example +/// JSON data from public assets instead. #[derive(Serialize, Deserialize, Debug)] pub struct BallotData { + /// Area where the ballot was cast. pub area_id: String, + /// Voter identifier (Keycloak user id). pub voter_id: String, + /// Ballot identifier used by cast-vote records. pub ballot_id: String, + /// Public URL used to track the ballot. pub ballot_tracker_url: String, + /// Optional time zone for rendering timestamps. pub time_zone: Option, + /// Optional date format for rendering timestamps. pub date_format: Option, } #[derive(Serialize, Deserialize, Debug, Clone)] +/// User-side data rendered into the ballot-receipt user template. pub struct UserData { + /// Ballot identifier to display on the receipt. pub ballot_id: String, + /// Public ballot tracker URL. pub ballot_tracker_url: String, + /// HTML template snippet for rendering a QR code. pub qrcode: String, + /// HTML template snippet for rendering the logo. pub logo: String, + /// Timestamp string included in the rendered receipt. pub timestamp: String, } #[derive(Serialize, Deserialize, Debug, Clone)] +/// System-side variables used by the ballot-receipt system template. pub struct SystemData { + /// User template already rendered with `UserData`. pub rendered_user_template: String, + /// PDF title. pub title: String, + /// URL or path to the logo image. pub file_logo: String, + /// URL or path to the QR-code JS library. pub file_qrcode_lib: String, } #[derive(Debug)] +/// Renderer for the ballot-receipt report templates. +#[allow(missing_docs_in_private_items)] pub struct BallotTemplate { ids: ReportOrigins, + /// Only present in real mode; preview mode uses public-assets JSON. pub ballot_data: Option, } impl BallotTemplate { + /// Creates a renderer for the given tenant/event and ballot inputs. pub fn new(ids: ReportOrigins, ballot_data: Option) -> Self { BallotTemplate { ids, ballot_data } } @@ -99,6 +129,12 @@ impl TemplateRenderer for BallotTemplate { } #[instrument(err, skip_all)] + /// Validates the ballot exists in cast votes and builds receipt template data. + /// + /// # Errors + /// + /// Returns an error if required ids are missing, ids are not valid UUIDv4, + /// cast votes cannot be fetched, or the given ballot cannot be found for the voter. async fn prepare_user_data( &self, hasura_transaction: &Transaction<'_>, @@ -159,6 +195,11 @@ impl TemplateRenderer for BallotTemplate { } #[instrument(err, skip_all)] + /// Prepares system-side variables used by the system template. + /// + /// # Errors + /// + /// Returns an error if public-assets configuration cannot be resolved. async fn prepare_system_data( &self, rendered_user_template: String, diff --git a/packages/windmill/src/services/reports/electoral_results.rs b/packages/windmill/src/services/reports/electoral_results.rs index b706014b88..ef20e81d34 100644 --- a/packages/windmill/src/services/reports/electoral_results.rs +++ b/packages/windmill/src/services/reports/electoral_results.rs @@ -1,6 +1,12 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Electoral-results report template renderer. +//! +//! This report integrates with the shared template-rendering pipeline and +//! provides system-side variables required by the PDF rendering backend. + use super::template_renderer::*; use crate::postgres::reports::ReportType; use crate::services::temp_path::*; @@ -16,17 +22,23 @@ use tracing::{info, instrument}; use velvet::pipes::generate_reports::TemplateData; #[derive(Debug, Serialize, Deserialize, Clone)] +/// System-side variables used by the electoral-results system template. pub struct SystemData { + /// User template already rendered with `UserData`. pub rendered_user_template: String, + /// URL or path to the QR-code JS library used by the PDF rendering backend. pub file_qrcode_lib: String, } #[derive(Debug)] +/// Renderer for the electoral-results report templates. +#[allow(missing_docs_in_private_items)] pub struct ElectoralResults { ids: ReportOrigins, } impl ElectoralResults { + /// Creates a renderer bound to a specific tenant/event (and optionally election/template). pub fn new(ids: ReportOrigins) -> Self { ElectoralResults { ids } } @@ -75,6 +87,11 @@ impl TemplateRenderer for ElectoralResults { } #[instrument(err, skip_all)] + /// Prepares the user-side data for this report type. + /// + /// # Errors + /// + /// Currently unimplemented for this report type. async fn prepare_user_data( &self, hasura_transaction: &Transaction<'_>, @@ -84,6 +101,12 @@ impl TemplateRenderer for ElectoralResults { } #[instrument(err, skip_all)] + /// Prepares system-side variables used by the system template. + /// + /// # Errors + /// + /// Returns an error if public-assets configuration cannot be resolved when + /// rendering is performed in-place. async fn prepare_system_data( &self, rendered_user_template: String, diff --git a/packages/windmill/src/services/reports/initialization.rs b/packages/windmill/src/services/reports/initialization.rs index 1d24a926e5..ba47cbe424 100644 --- a/packages/windmill/src/services/reports/initialization.rs +++ b/packages/windmill/src/services/reports/initialization.rs @@ -1,6 +1,13 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Initialization report template renderer. +//! +//! This report integrates with the shared template-rendering pipeline and +//! provides system-side variables (such as the QR-code JS library location) +//! required by the PDF rendering backend. + use super::template_renderer::*; use crate::postgres::reports::ReportType; use crate::services::temp_path::*; @@ -15,17 +22,23 @@ use tracing::{info, instrument}; use velvet::pipes::generate_reports::TemplateData; #[derive(Debug, Serialize, Deserialize, Clone)] +/// System-side variables used by the initialization report system template. pub struct SystemData { + /// User template already rendered with `UserData`. pub rendered_user_template: String, + /// URL or path to the QR-code JS library used by the PDF rendering backend. pub file_qrcode_lib: String, } #[derive(Debug)] +/// Renderer for the initialization report templates. +#[allow(missing_docs_in_private_items)] pub struct InitializationTemplate { ids: ReportOrigins, } impl InitializationTemplate { + /// Creates a renderer bound to a specific tenant/event (and optionally election/template). pub fn new(ids: ReportOrigins) -> Self { InitializationTemplate { ids } } @@ -74,6 +87,11 @@ impl TemplateRenderer for InitializationTemplate { } #[instrument(err, skip_all)] + /// Prepares the user-side data for this report type. + /// + /// # Errors + /// + /// Currently unimplemented for this report type. async fn prepare_user_data( &self, hasura_transaction: &Transaction<'_>, @@ -83,6 +101,12 @@ impl TemplateRenderer for InitializationTemplate { } #[instrument(err, skip_all)] + /// Prepares system-side variables used by the system template. + /// + /// # Errors + /// + /// Returns an error if public-assets configuration cannot be resolved when + /// rendering is performed in-place. async fn prepare_system_data( &self, rendered_user_template: String, diff --git a/packages/windmill/src/services/reports/manual_verification.rs b/packages/windmill/src/services/reports/manual_verification.rs index cdf1ae82de..f08c331cdd 100644 --- a/packages/windmill/src/services/reports/manual_verification.rs +++ b/packages/windmill/src/services/reports/manual_verification.rs @@ -1,6 +1,12 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Manual verification report template renderer. +//! +//! This report produces a PDF containing a per-voter link (and QR code) that +//! takes the voter through a Keycloak manual verification flow. + use super::template_renderer::*; use crate::postgres::reports::{Report, ReportType}; use crate::services::temp_path::*; @@ -14,35 +20,44 @@ use serde::{Deserialize, Serialize}; use std::env; use tracing::{info, instrument}; -/// Struct returned by the API call for manual verification URL +/// Response returned by Keycloak's manual-verification endpoint. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ManualVerificationOutput { + /// Generated link that redirects to the voting portal login flow. pub link: String, } -/// Struct for User Data +/// User-side data rendered into the manual-verification user template. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct UserData { + /// Per-voter manual verification link. pub manual_verification_url: String, + /// HTML template snippet for rendering a QR code. pub qrcode: String, + /// HTML template snippet for rendering the logo. pub logo: String, } -/// Struct for System Data +/// System-side variables used by the manual-verification system template. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct SystemData { + /// User template already rendered with `UserData`. pub rendered_user_template: String, + /// URL or path to the logo image. pub file_logo: String, + /// URL or path to the QR-code JS library used by the PDF rendering backend. pub file_qrcode_lib: String, } -/// Implementation of TemplateRenderer for Manual Verification +/// Renderer for the manual-verification report templates. #[derive(Debug)] +#[allow(missing_docs_in_private_items)] pub struct ManualVerificationTemplate { ids: ReportOrigins, } impl ManualVerificationTemplate { + /// Creates a renderer bound to a specific tenant/event and voter id. pub fn new(ids: ReportOrigins) -> Self { ManualVerificationTemplate { ids } } @@ -88,6 +103,11 @@ impl TemplateRenderer for ManualVerificationTemplate { } #[instrument(err, skip_all)] + /// Builds the per-voter manual verification URL and template placeholders. + /// + /// # Errors + /// + /// Returns an error if the URL cannot be generated via Keycloak. async fn prepare_user_data( &self, hasura_transaction: &Transaction<'_>, @@ -109,6 +129,11 @@ impl TemplateRenderer for ManualVerificationTemplate { } #[instrument(err, skip_all)] + /// Prepares system-side variables used by the system template. + /// + /// # Errors + /// + /// Returns an error if public-assets configuration cannot be resolved. async fn prepare_system_data( &self, rendered_user_template: String, @@ -143,7 +168,15 @@ impl TemplateRenderer for ManualVerificationTemplate { } } -/// Function to get the manual verification URL +/// Requests a per-voter manual verification link from Keycloak. +/// +/// The returned link is intended to redirect back to the voting portal login +/// page for the provided tenant and election event. +/// +/// # Errors +/// +/// Returns an error if required environment variables are missing, the HTTP +/// request fails, or Keycloak returns a non-OK response. #[instrument(err)] async fn get_manual_verification_url( tenant_id: &str, diff --git a/packages/windmill/src/services/reports/mod.rs b/packages/windmill/src/services/reports/mod.rs index 81520cbb95..2e31b468a0 100644 --- a/packages/windmill/src/services/reports/mod.rs +++ b/packages/windmill/src/services/reports/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Report template renderers and report-generation helpers. + pub mod activity_log; pub mod ballot_images; pub mod ballot_receipt; diff --git a/packages/windmill/src/services/reports/report_variables.rs b/packages/windmill/src/services/reports/report_variables.rs index 32ea749d62..8858a69a2a 100644 --- a/packages/windmill/src/services/reports/report_variables.rs +++ b/packages/windmill/src/services/reports/report_variables.rs @@ -1,7 +1,9 @@ -use crate::postgres::area::{get_area_by_id, get_areas_by_election_id}; // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Shared report variables and data extraction helpers. + +use crate::postgres::area::{get_area_by_id, get_areas_by_election_id}; use crate::postgres::election::get_election_by_id; use crate::postgres::results_area_contest::get_results_area_contest; use crate::postgres::results_election::{ @@ -36,11 +38,16 @@ use tracing::instrument; pub use crate::services::users::{VALIDATE_ID_ATTR_NAME, VALIDATE_ID_REGISTERED_VOTER}; pub use sequent_core::util::date_time::get_date_and_time; +/// Default inspector label used when no SBEI data is configured. pub const DEFULT_CHAIRPERSON: &str = "Chairperson"; +/// Default inspector label used when no SBEI data is configured. pub const DEFULT_POLL_CLERK: &str = "Poll Clerk"; +/// Default inspector label used when no SBEI data is configured. pub const DEFULT_THIRD_MEMBER: &str = "Third Member"; #[derive(Serialize, Deserialize, Debug, Clone)] +/// Metadata printed on reports to support traceability. +#[allow(missing_docs)] pub struct ExecutionAnnotations { pub date_printed: String, pub report_hash: String, @@ -51,22 +58,37 @@ pub struct ExecutionAnnotations { pub results_hash: Option, } +/// Returns the application build hash for report annotations. pub fn get_app_hash() -> String { env::var(ENV_VAR_APP_HASH).unwrap_or("-".to_string()) } +/// Returns the application version for report annotations. pub fn get_app_version() -> String { env::var(ENV_VAR_APP_VERSION).unwrap_or("-".to_string()) } #[derive(Debug)] +/// Aggregated turnout information for an election or area. pub struct ElectionVotesData { + /// Registered voters count used for turnout computation. pub registered_voters: Option, + /// Total ballots cast. pub total_ballots: Option, + /// Turnout percentage in the range [0, 100]. pub voters_turnout: Option, } #[instrument(err, skip_all)] +/// Computes turnout-related data for an election. +/// +/// This combines registered voter counts derived from area annotations with the +/// most recent election results totals (when available). +/// +/// # Errors +/// +/// Returns an error if required areas/results cannot be queried or annotations +/// cannot be parsed. pub async fn generate_election_votes_data( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -124,6 +146,12 @@ pub async fn generate_election_votes_data( } #[instrument(err, skip_all)] +/// Computes turnout-related data for a specific area (and optional contest). +/// +/// # Errors +/// +/// Returns an error if required area/results cannot be queried or annotations +/// cannot be parsed. pub async fn generate_election_area_votes_data( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -176,6 +204,11 @@ pub async fn generate_election_area_votes_data( } #[instrument(err, skip_all)] +/// Calculates turnout percentage for `total_ballots / registered_voters`. +/// +/// # Errors +/// +/// Returns an error if the calculation cannot be performed. pub fn calc_voters_turnout(total_ballots: i64, registered_voters: i64) -> Result> { if registered_voters == 0 { return Ok(Some(0.0)); @@ -186,6 +219,11 @@ pub fn calc_voters_turnout(total_ballots: i64, registered_voters: i64) -> Result } #[instrument(err, skip_all)] +/// Counts registered voters in a Keycloak realm for a specific area id. +/// +/// # Errors +/// +/// Returns an error if Keycloak cannot be queried. pub async fn get_total_number_of_registered_voters_for_area_id( keycloak_transaction: &Transaction<'_>, realm: &str, @@ -217,6 +255,11 @@ pub async fn get_total_number_of_registered_voters_for_area_id( } #[instrument(err, skip_all)] +/// Counts all enabled registered voters in a Keycloak realm. +/// +/// # Errors +/// +/// Returns an error if Keycloak cannot be queried. pub async fn get_total_number_of_registered_voters( keycloak_transaction: &Transaction<'_>, realm: &str, @@ -228,6 +271,8 @@ pub async fn get_total_number_of_registered_voters( } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Election annotations extracted into a stable shape for templates. +#[allow(missing_docs)] pub struct ElectionData { pub geographical_region: String, pub voting_center: String, @@ -237,6 +282,11 @@ pub struct ElectionData { } #[instrument(err, skip_all)] +/// Extracts election annotations used by multiple reports. +/// +/// # Errors +/// +/// Returns an error if the election annotations are missing or cannot be parsed. pub async fn extract_election_data(election: &Election) -> Result { let annotations: crate::services::consolidation::eml_generator::MiruElectionAnnotations = election.get_annotations_or_empty_values()?; @@ -250,11 +300,18 @@ pub async fn extract_election_data(election: &Election) -> Result }) } +/// Election-event annotations required by some MIRU-oriented report templates. +#[allow(missing_docs)] pub struct ElectionEventAnnotation { pub sbei_users: Vec, } #[instrument(err, skip_all)] +/// Extracts election-event annotations used by report templates. +/// +/// # Errors +/// +/// Returns an error if the election event annotations are missing or cannot be parsed. pub async fn extract_election_event_annotations( election_event: &ElectionEvent, ) -> Result { @@ -267,17 +324,29 @@ pub async fn extract_election_event_annotations( } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Inspector entry rendered in area-level reports. +#[allow(missing_docs)] pub struct InspectorData { pub role: String, pub name: String, } +/// Area annotations extracted into a stable shape for templates. +#[allow(missing_docs)] pub struct AreaData { pub inspectors: Vec, pub registered_voters: i64, } #[instrument(err, skip_all)] +/// Extracts inspector and voter-count data for an area. +/// +/// If SBEI ids are not configured (or no SBEI users are available at event +/// level), this falls back to default inspector labels. +/// +/// # Errors +/// +/// Returns an error if area annotations cannot be parsed. pub async fn extract_area_data( area: &Area, election_event_sbei_users: Vec, @@ -331,6 +400,12 @@ pub async fn extract_area_data( } #[instrument(err, skip(hasura_transaction))] +/// Retrieves the `results_hash` annotation for the latest electoral-results tally. +/// +/// # Errors +/// +/// Returns an error if no tally session/execution exists yet or if querying +/// results data fails. pub async fn get_results_hash( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -406,6 +481,11 @@ pub async fn get_results_hash( } #[instrument(err, skip_all)] +/// Computes a deterministic hash used to identify a report instance. +/// +/// # Errors +/// +/// Returns an error if hashing fails. pub async fn get_report_hash(report_type: &str) -> Result { let date_and_time = get_date_and_time(); let report_date_time = format!("{}{}", report_type, date_and_time); @@ -415,6 +495,8 @@ pub async fn get_report_hash(report_type: &str) -> Result { } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Per-election user data used by some report templates. +#[allow(missing_docs)] pub struct UserDataElection { pub election_dates: StringifiedPeriodDates, pub election_name: String, @@ -422,12 +504,19 @@ pub struct UserDataElection { } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Aggregated election data grouped by region/post for template rendering. +#[allow(missing_docs)] pub struct UserDataElections { pub regions: Vec<(String, Vec)>, pub elections: Vec, } #[instrument(err, skip_all)] +/// Builds per-election template data and a region/post index. +/// +/// # Errors +/// +/// Returns an error if annotations or election dates cannot be computed. pub async fn process_elections( elections: Vec, scheduled_events: Vec, diff --git a/packages/windmill/src/services/reports/template_renderer.rs b/packages/windmill/src/services/reports/template_renderer.rs index 4af457629c..96c88afd58 100644 --- a/packages/windmill/src/services/reports/template_renderer.rs +++ b/packages/windmill/src/services/reports/template_renderer.rs @@ -2,6 +2,14 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Shared report template rendering pipeline. +//! +//! Concrete report types implement [`TemplateRenderer`] and provide user/system +//! data. The pipeline resolves templates (custom in Postgres or defaults from +//! public assets), renders them with `handlebars`-style substitutions, produces +//! PDFs, optionally encrypts the output, uploads the resulting document, and +//! may notify recipients via email. + use super::utils::get_public_asset_template; use crate::postgres::reports::{get_template_alias_for_report, Report, ReportType}; use crate::postgres::{election_event, template}; @@ -43,6 +51,7 @@ use tempfile::{NamedTempFile, TempPath}; use tokio::runtime::Runtime; use tracing::{debug, info, instrument, warn}; +/// Tokio runtime used to execute async rendering work from Rayon threads. static GLOBAL_RT: Lazy = Lazy::new(|| { tokio::runtime::Builder::new_multi_thread() .enable_all() @@ -51,12 +60,17 @@ static GLOBAL_RT: Lazy = Lazy::new(|| { }); #[allow(non_camel_case_types)] #[derive(Display, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, EnumString)] +/// Selects whether the report is rendered using preview data or live data. pub enum GenerateReportMode { + /// Render using the preview JSON found in public assets for the report type. PREVIEW, + /// Render using live data from the database. REAL, } #[derive(Debug)] +/// Identifiers and context describing where a report generation comes from. +#[allow(missing_docs)] pub struct ReportOrigins { pub tenant_id: String, pub election_event_id: String, @@ -74,11 +88,14 @@ pub struct ReportOrigins { // } // } -/// To signify how the report generation was triggered #[derive(Debug, Clone, Copy)] +/// Signify how the report generation was triggered pub enum ReportOriginatedFrom { + /// Triggered by the voting portal. VotingPortal, + /// Triggered by an export action. ExportFunction, + /// Triggered from the admin reports tab. ReportsTab, } @@ -88,32 +105,57 @@ pub enum ReportOriginatedFrom { )] #[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] +/// Encryption policy applied to the generated report artifact. +#[allow(missing_docs)] pub enum EReportEncryption { Unencrypted, ConfiguredPassword, } +/// Default maximum number of items rendered per report when batching is supported. pub const DEFAULT_ITEMS_PER_REPORT_LIMIT: usize = 1000; -/// Trait that defines the behavior for rendering templates + +/// Trait implemented by each report type to provide template data and metadata. +#[allow(missing_docs)] // `async_trait` expands to items that trigger `missing_docs` on this trait. #[async_trait] pub trait TemplateRenderer: Debug { + /// Report-specific data rendered into the user template. type UserData: Serialize + ToMap + Send + for<'de> Deserialize<'de>; + /// Report-specific data rendered into the system template. type SystemData: Serialize + ToMap + for<'de> Deserialize<'de>; + /// Base name used to locate default templates and preview data in public assets. fn base_name(&self) -> String; + /// Report type used to select templates and behavior. fn get_report_type(&self) -> ReportType; + /// Filename prefix for generated artifacts. fn prefix(&self) -> String; + /// Tenant identifier used by data fetchers and template resolution. fn get_tenant_id(&self) -> String; + /// Election event identifier used by data fetchers and template resolution. fn get_election_event_id(&self) -> String; + /// Source that initiated report generation. fn get_report_origin(&self) -> ReportOriginatedFrom; /// Can be None when a report is generated with no template assigned to it, /// or from other place than the reports TAB. fn get_initial_template_alias(&self) -> Option; + /// Returns the number of items available for this report type, if applicable. + /// + /// This is used to decide whether batched processing should be used. + /// + /// # Errors + /// + /// Returns an error if counting requires external IO and it fails. async fn count_items(&self, hasura_transaction: &Transaction<'_>) -> Result> { Ok(None) } + /// Prepares a single batch of user data for reports that support batching. + /// + /// # Errors + /// + /// Returns an error if the data cannot be fetched or assembled. async fn prepare_user_data_batch( &self, hasura_transaction: &Transaction<'_>, @@ -126,11 +168,21 @@ pub trait TemplateRenderer: Debug { )) } + /// Prepares user-side data for a report render. + /// + /// # Errors + /// + /// Returns an error if report-specific data cannot be fetched or validated. async fn prepare_user_data( &self, hasura_transaction: &Transaction<'_>, keycloak_transaction: &Transaction<'_>, ) -> Result; + /// Prepares system-side variables after the user template has been rendered. + /// + /// # Errors + /// + /// Returns an error if report-specific system variables cannot be assembled. async fn prepare_system_data(&self, rendered_user_template: String) -> Result; @@ -159,16 +211,23 @@ pub trait TemplateRenderer: Debug { is_scheduled_task || self.get_voter_id().is_some() } - // Default implementation, can be overridden in specific reports that have - // voterId + /// Returns the voter id associated with this report, when applicable. + /// + /// Default implementation, can be overridden in specific reports that have + /// `voter_id`. #[instrument(skip(self))] fn get_voter_id(&self) -> Option { None } #[instrument(err, skip(self))] + /// Loads preview user data from the report's public-assets JSON file. + /// + /// # Errors + /// + /// Returns an error if the preview data cannot be fetched or deserialized + /// into `UserData`. async fn prepare_preview_data(&self) -> Result { - println!("!!!!!prepare_preview_data"); let json_data = self .get_preview_data_file() .await @@ -180,6 +239,12 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip(self, hasura_transaction))] + /// Loads the custom template record (if any) for this report type. + /// + /// # Errors + /// + /// Returns an error if the template alias cannot be resolved, the template + /// record cannot be read, or the stored JSON cannot be deserialized. async fn get_custom_user_template_data( &self, hasura_transaction: &Transaction<'_>, @@ -279,18 +344,33 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip(self))] + /// Fetches the default user template from public assets. + /// + /// # Errors + /// + /// Returns an error if the template cannot be fetched. async fn get_default_user_template(&self) -> Result { let base_name = self.base_name(); get_public_asset_template(format!("{base_name}_user.hbs").as_str()).await } #[instrument(err, skip(self))] + /// Fetches the system template from public assets. + /// + /// # Errors + /// + /// Returns an error if the template cannot be fetched. async fn get_system_template(&self) -> Result { let base_name = self.base_name(); get_public_asset_template(format!("{base_name}_system.hbs").as_str()).await } #[instrument(err, skip(self))] + /// Fetches the preview JSON data file from public assets. + /// + /// # Errors + /// + /// Returns an error if the file cannot be fetched. async fn get_preview_data_file(&self) -> Result { let base_name = self.base_name(); info!("base_name: {}", &base_name); @@ -298,13 +378,22 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip(self))] + /// Fetches the default extra-config JSON file from public assets. + /// + /// # Errors + /// + /// Returns an error if the file cannot be fetched. async fn get_default_extra_config_file(&self) -> Result { let base_name = self.base_name(); get_public_asset_template(format!("{base_name}_extra_config.json").as_str()).await } - /// Read the default extra config for this template's type like PDF options and communication templates. #[instrument(err, skip(self))] + /// Read the default extra config for this template's type like PDF options and communication templates. + /// + /// # Errors + /// + /// Returns an error if the extra config cannot be fetched or deserialized. async fn get_default_extra_config(&self) -> Result { let json_data = self .get_default_extra_config_file() @@ -316,6 +405,11 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip_all)] + /// Renders the report without batching support. + /// + /// # Errors + /// + /// Returns an error if user/system data preparation or template rendering fails. async fn generate_report_inner( &self, generate_mode: GenerateReportMode, @@ -362,6 +456,12 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip_all)] + /// Generates a report using the provided user template and system template. + /// + /// # Errors + /// + /// Returns an error if the user data cannot be prepared, the user template cannot be rendered, + /// the system data cannot be prepared, or the system template cannot be rendered. async fn generate_report( &self, generate_mode: GenerateReportMode, @@ -463,10 +563,16 @@ pub trait TemplateRenderer: Debug { Ok((user_tpl_document, ext_cfg)) } - // Inner implementation for `execute_report()` so that implementors of the - // trait can reimplement the function while calling the parent default - // implementation too when needed #[instrument(err, skip_all)] + /// Executes the full report pipeline (render → PDF/ZIP → optional encryption → upload → email). + /// + /// This is a shared implementation used by most report types. Report types + /// may override `execute_report()` to customize behavior while still calling + /// this helper when appropriate. + /// + /// # Errors + /// + /// Returns an error if rendering, file IO, encryption, upload, or notifications fail. #[allow(clippy::too_many_arguments)] async fn execute_report_inner( &self, @@ -818,6 +924,11 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip_all)] + /// Runs report generation using the default pipeline. + /// + /// # Errors + /// + /// Returns an error if the underlying pipeline fails. async fn execute_report( &self, document_id: &str, @@ -847,6 +958,12 @@ pub trait TemplateRenderer: Debug { } #[instrument(err, skip(self))] + /// Resolves the list of email recipients for report notifications. + /// + /// # Errors + /// + /// Returns an error if a voter id is required but missing, Keycloak cannot be + /// queried, or the voter has no email configured. async fn get_email_recipients( &self, recipients: Vec, diff --git a/packages/windmill/src/services/reports/utils.rs b/packages/windmill/src/services/reports/utils.rs index 940e951ee4..9f3c1cecd0 100644 --- a/packages/windmill/src/services/reports/utils.rs +++ b/packages/windmill/src/services/reports/utils.rs @@ -2,18 +2,29 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Helpers for fetching report templates from public assets storage. + use anyhow::{anyhow, Context, Result}; use sequent_core::services::s3::get_minio_url; use std::env; use tracing::instrument; -/// Function to get the public assets path environment variable +/// Returns the `PUBLIC_ASSETS_PATH` used to locate report templates in object storage. +/// +/// # Errors +/// +/// Returns an error if `PUBLIC_ASSETS_PATH` is not set. #[instrument(err, skip_all)] pub fn get_public_assets_path_env_var() -> Result { env::var("PUBLIC_ASSETS_PATH").map_err(|_| anyhow!("PUBLIC_ASSETS_PATH env var missing")) } -/// Helper function to get public asset templates +/// Fetches a template file from the public assets bucket. +/// +/// # Errors +/// +/// Returns an error if required environment/config is missing, the request fails, +/// or the object does not exist. #[instrument(err, skip_all)] pub async fn get_public_asset_template(filename: &str) -> Result { let public_asset_path = get_public_assets_path_env_var()?; diff --git a/packages/windmill/src/services/reports_vault.rs b/packages/windmill/src/services/reports_vault.rs index 9b76c46c96..0ca30fcf87 100644 --- a/packages/windmill/src/services/reports_vault.rs +++ b/packages/windmill/src/services/reports_vault.rs @@ -1,11 +1,15 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Vault-oriented secrets loading for report generation. + use crate::services::vault::{read_secret, save_secret}; use anyhow::anyhow; use deadpool_postgres::Transaction; use tracing::instrument; +/// Build the vault secret key used to store a report password for a tenant/event. pub fn get_report_secret_key( tenant_id: &str, election_event_id: &str, @@ -20,6 +24,11 @@ pub fn get_report_secret_key( } #[instrument(err)] +/// Get the password for a report from the vault. +/// +/// # Errors +/// +/// Returns an error if the vault lookup fails. pub async fn get_password( hasura_transaction: &Transaction<'_>, tenant_id: String, @@ -41,6 +50,11 @@ pub async fn get_password( } #[instrument(err)] +/// Save the password for a report to the vault. +/// +/// # Errors +/// +/// Returns an error if saving the secret to the vault fails. pub async fn get_report_key_pair( hasura_transaction: &Transaction<'_>, tenant_id: String, diff --git a/packages/windmill/src/services/serialize_tasks_logs.rs b/packages/windmill/src/services/serialize_tasks_logs.rs index 1092e0a1d5..f54719452b 100644 --- a/packages/windmill/src/services/serialize_tasks_logs.rs +++ b/packages/windmill/src/services/serialize_tasks_logs.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Structuring and serializing task execution logs. + use crate::services::ceremonies::serialize_logs::sort_logs; use sequent_core::types::ceremonies::Log; use sequent_core::{serialization::deserialize_with_path, services::date::ISO8601}; @@ -8,6 +11,7 @@ use serde_json::value::Value; use tracing::{event, instrument, Level}; #[instrument] +/// Create general start log. pub fn general_start_log() -> Vec { vec![Log { created_date: ISO8601::to_string(&ISO8601::now()), @@ -16,6 +20,7 @@ pub fn general_start_log() -> Vec { } #[instrument(skip(current_logs))] +/// Append general log to current logs. pub fn append_general_log(current_logs: &Option, message: &str) -> Vec { let value = current_logs.clone().unwrap_or(Value::Array(vec![])); let mut logs: Vec = diff --git a/packages/windmill/src/services/sql_utils.rs b/packages/windmill/src/services/sql_utils.rs index 858f9460d6..e0dab7ab0b 100644 --- a/packages/windmill/src/services/sql_utils.rs +++ b/packages/windmill/src/services/sql_utils.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! SQL utilities for escaping SQL literals and identifiers. + use anyhow::{anyhow, Result}; use deadpool_postgres::Pool; @@ -26,6 +28,10 @@ pub fn escape_sql_literal(value: &str) -> String { /// Verifies that PostgreSQL has standard_conforming_strings = on, which is /// required for escape_sql_literal to be a complete defense against SQL /// injection. Returns an error if the setting is off. +/// +/// # Errors +/// +/// Returns an error if a connection cannot be obtained, the setting cannot be read, or it is not `on`. pub async fn assert_standard_conforming_strings(pool: &Pool) -> Result<()> { let client = pool.get().await.map_err(|e| { anyhow!("Failed to get connection for standard_conforming_strings check: {e}") diff --git a/packages/windmill/src/services/tally_sheets/mod.rs b/packages/windmill/src/services/tally_sheets/mod.rs index a303f6f84b..0aad180da6 100644 --- a/packages/windmill/src/services/tally_sheets/mod.rs +++ b/packages/windmill/src/services/tally_sheets/mod.rs @@ -2,5 +2,10 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Tally sheet helpers. +//! +//! This module groups utilities used to validate and group tally sheets fetched +//! from the data store for counting and reporting. + pub mod tally; pub mod validation; diff --git a/packages/windmill/src/services/tally_sheets/tally.rs b/packages/windmill/src/services/tally_sheets/tally.rs index 9e67a440d0..fa3c9a9af2 100644 --- a/packages/windmill/src/services/tally_sheets/tally.rs +++ b/packages/windmill/src/services/tally_sheets/tally.rs @@ -1,11 +1,14 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Grouping utilities for tally sheets. + use sequent_core::types::hasura::core::TallySheet; use std::collections::HashMap; use tracing::instrument; -// Returns a Map<(area_id,contest_id), Vec> +/// Builds a lookup map of tally sheets grouped by `(area_id, contest_id)`. #[instrument(skip_all)] pub fn create_tally_sheets_map( tally_sheets: &Vec, diff --git a/packages/windmill/src/services/tally_sheets/validation.rs b/packages/windmill/src/services/tally_sheets/validation.rs index fe047a6193..62904e5ec8 100644 --- a/packages/windmill/src/services/tally_sheets/validation.rs +++ b/packages/windmill/src/services/tally_sheets/validation.rs @@ -1,6 +1,8 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Tally sheet validation helpers. use crate::types::error::Result; use anyhow::anyhow; use sequent_core::ballot::{Candidate, Contest}; @@ -9,6 +11,20 @@ use std::collections::HashMap; use tracing::instrument; #[instrument(skip_all, err)] +/// Validates that a stored tally sheet is internally consistent for a contest. +/// +/// This performs basic accounting checks (votes vs. census, invalid/valid totals) +/// and ensures each candidate result refers to a candidate that exists in the +/// contest definition. +/// +/// # Errors +/// +/// Returns an error if the tally sheet is missing content, has inconsistent vote +/// totals, or contains candidate results that don't match the contest. +/// +/// # Panics +/// +/// Panics if vote totals overflow `u64` while being summed for validation. pub fn validate_tally_sheet(tally_sheet: &TallySheet, contest: &Contest) -> Result<()> { let Some(results) = tally_sheet.content.clone() else { return Err(anyhow!("Invalid tally sheet {:?}, content missing", tally_sheet).into()); diff --git a/packages/windmill/src/services/tasks_execution.rs b/packages/windmill/src/services/tasks_execution.rs index fb452e44c7..11d8ccbdae 100644 --- a/packages/windmill/src/services/tasks_execution.rs +++ b/packages/windmill/src/services/tasks_execution.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Task execution bookkeeping: status transitions and persistence. + use crate::postgres::tasks_execution::{insert_tasks_execution, update_task_execution_status}; use crate::services::serialize_tasks_logs::*; use crate::types::tasks::ETasksExecution; @@ -12,11 +14,18 @@ use serde::{Deserialize, Serialize}; use tracing::instrument; #[derive(Serialize, Deserialize, Debug, Clone)] +/// Per-task annotations stored alongside the task execution record. pub struct TaskAnnotations { + /// Document id associated with the task, when applicable. document_id: Option, } #[instrument(skip_all, err)] +/// Post a new task execution record. +/// +/// # Errors +/// +/// Returns an error if logs cannot be serialized or the task record cannot be created. pub async fn post( tenant_id: &str, election_event_id: Option<&str>, @@ -44,6 +53,11 @@ pub async fn post( // TODO filter also by tenant-id and document-id #[instrument(skip_all, err)] +/// Update a task execution record. +/// +/// # Errors +/// +/// Returns an error if annotations cannot be serialized or the task record cannot be updated. pub async fn update( tenant_id: &str, task_id: &str, @@ -60,6 +74,11 @@ pub async fn update( // TODO filter also by tenant-id and document-id #[instrument(skip_all, err)] +/// Update a task execution record to indicate success. +/// +/// # Errors +/// +/// Returns an error if logs/annotations cannot be serialized or the task record cannot be updated. pub async fn update_complete( task: &TasksExecution, document_id: Option, @@ -78,6 +97,11 @@ pub async fn update_complete( // TODO filter also by tenant-id and document-id #[instrument(skip_all, err)] +/// Update a task execution record to indicate failure. +/// +/// # Errors +/// +/// Returns an error if logs/annotations cannot be serialized or the task record cannot be updated. pub async fn update_fail(task: &TasksExecution, err_message: &str) -> Result<(), anyhow::Error> { let task_id = &task.id; let new_status = TasksExecutionStatus::FAILED; diff --git a/packages/windmill/src/services/tasks_semaphore.rs b/packages/windmill/src/services/tasks_semaphore.rs index 3090b49488..40e018332e 100644 --- a/packages/windmill/src/services/tasks_semaphore.rs +++ b/packages/windmill/src/services/tasks_semaphore.rs @@ -1,16 +1,24 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing semaphore for tasks. + use anyhow::{anyhow, Context, Result}; use once_cell::sync::OnceCell; use std::sync::Arc; use tokio::sync::{Semaphore, SemaphorePermit}; use tracing::instrument; -// Static OnceCell to hold the semaphore +/// Static OnceCell to hold the semaphore pub static SEMAPHORE: OnceCell> = OnceCell::new(); #[instrument(err)] +/// Initialize the semaphore. +/// +/// # Errors +/// +/// Returns an error if the semaphore has already been initialized. pub fn init_semaphore(count: usize) -> Result<()> { // Create the semaphore and wrap it in Arc let semaphore = Arc::new(Semaphore::new(count)); @@ -24,6 +32,11 @@ pub fn init_semaphore(count: usize) -> Result<()> { } #[instrument(err)] +/// Acquire a semaphore permit. +/// +/// # Errors +/// +/// Returns an error if the semaphore is not initialized or a permit cannot be acquired. pub async fn acquire_semaphore() -> Result> { // Get the semaphore and acquire a permit SEMAPHORE diff --git a/packages/windmill/src/services/temp_path.rs b/packages/windmill/src/services/temp_path.rs index 851dcc1c91..7f57bf6f00 100644 --- a/packages/windmill/src/services/temp_path.rs +++ b/packages/windmill/src/services/temp_path.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Temporary directories and scratch paths for file processing. + use crate::types::error::Result; use anyhow::Context; use std::env; @@ -11,15 +14,26 @@ use tempfile::Builder; use tempfile::{NamedTempFile, TempPath}; use tracing::{event, instrument, Level}; +/// QR code template. pub const QR_CODE_TEMPLATE: &str = "
"; +/// Logo template. pub const LOGO_TEMPLATE: &str = "
"; +/// Public assets logo image. pub const PUBLIC_ASSETS_LOGO_IMG: &str = "sequent-logo.svg"; +/// Public assets QR code library. pub const PUBLIC_ASSETS_QRCODE_LIB: &str = "qrcode.min.js"; +/// Public assets velvet ballot images template. pub const PUBLIC_ASSETS_VELVET_BALLOT_IMAGES_TEMPLATE: &str = "ballot_images_user.hbs"; +/// Public assets velvet ballot images template system. pub const PUBLIC_ASSETS_VELVET_BALLOT_IMAGES_TEMPLATE_SYSTEM: &str = "ballot_images_system.hbs"; +/// Public assets velvet MC ballot images template. pub const PUBLIC_ASSETS_VELVET_MC_BALLOT_IMAGES_TEMPLATE: &str = "mc_ballot_images_user.hbs"; +/// Velvet ballot images template title. pub const VELVET_BALLOT_IMAGES_TEMPLATE_TITLE: &str = "Ballot Images"; +/// Public assets I18N defaults. pub const PUBLIC_ASSETS_I18N_DEFAULTS: &str = "i18n_defaults.json"; +/// Public assets initialization report system template. pub const PUBLIC_ASSETS_INITIALIZATION_TEMPLATE_SYSTEM: &str = "initialization_report_system.hbs"; +/// Public assets electoral results template system. pub const PUBLIC_ASSETS_ELECTORAL_RESULTS_TEMPLATE_SYSTEM: &str = "electoral_results_system.hbs"; diff --git a/packages/windmill/src/services/to_result.rs b/packages/windmill/src/services/to_result.rs index 8d67cc49da..46ca93cb0c 100644 --- a/packages/windmill/src/services/to_result.rs +++ b/packages/windmill/src/services/to_result.rs @@ -2,11 +2,19 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Error conversion helpers mapping external failures into `anyhow::Result`. + use anyhow::{anyhow, Result}; use graphql_client::Response; use tracing::{event, instrument, Level}; +/// Trait to turn GraphQL `Response` values into `Result`s. pub trait ToResult { + /// Convert a GraphQL response into a `Result`, returning an error when the response contains errors. + /// + /// # Errors + /// + /// Returns `Err` if the response contains GraphQL errors. fn ok(self) -> Result; } diff --git a/packages/windmill/src/services/transmission.rs b/packages/windmill/src/services/transmission.rs index a904ce30f8..02e3a71d11 100644 --- a/packages/windmill/src/services/transmission.rs +++ b/packages/windmill/src/services/transmission.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Managing signed transmission bundles and dispatch to external custody or reporting systems. + use crate::{ postgres::tally_session::{get_tally_session_by_id, get_tally_sessions_by_election_event_id}, types::miru_plugin::{MiruServerDocumentStatus, MiruTallySessionData}, @@ -16,6 +19,11 @@ use super::consolidation::{ }; #[instrument(err, skip_all)] +/// Get the transmission data from the tally session by area. +/// +/// # Errors +/// +/// Returns an error if tally sessions cannot be fetched or area annotations cannot be parsed. pub async fn get_transmission_data_from_tally_session_by_area( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -73,6 +81,8 @@ pub async fn get_transmission_data_from_tally_session_by_area( } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Server data. +#[allow(missing_docs)] pub struct ServerData { pub server_code: String, pub transmitted: String, @@ -83,6 +93,8 @@ pub struct ServerData { } #[derive(Serialize, Deserialize, Debug, Clone)] +/// Transmission data. +#[allow(missing_docs)] pub struct TransmissionData { pub servers: Vec, pub total_transmitted: i64, @@ -91,6 +103,15 @@ pub struct TransmissionData { } #[instrument(err, skip_all)] +/// Get the transmission servers data. +/// +/// # Errors +/// +/// Returns an error if area annotations cannot be parsed. +/// +/// # Panics +/// +/// Panics if transmitted counters overflow while building the per-server summary. pub async fn get_transmission_servers_data( tally_session_data: &MiruTallySessionData, area: &Area, diff --git a/packages/windmill/src/services/users.rs b/packages/windmill/src/services/users.rs index 104be635f6..b3686accc0 100644 --- a/packages/windmill/src/services/users.rs +++ b/packages/windmill/src/services/users.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Managing Keycloak users, filtering, enrichment, and CSV export. + use crate::postgres::area::get_areas; use crate::postgres::election_event::get_election_event_by_id; use crate::services::cast_votes::get_users_with_vote_info; @@ -37,15 +39,28 @@ use tracing::error; use tracing::{debug, info, instrument}; use uuid::Uuid; +/// Validate ID attribute name. pub const VALIDATE_ID_ATTR_NAME: &str = "sequent.read-only.id-card-number-validated"; +/// Delegate to attribute name. pub const DELEGATE_TO_ATTR_NAME: &str = "delegate-vote-to"; +/// Validate ID registered voter. pub const VALIDATE_ID_REGISTERED_VOTER: &str = "VERIFIED"; +/// Advance a 1-based SQL `$n` placeholder index, panicking on overflow. +/// +/// # Panics +/// +/// Panics if `n + delta` exceeds `i32::MAX`. #[inline] fn bump_sql_param_index(n: i32, delta: i32) -> i32 { n.checked_add(delta).expect("SQL parameter index overflow") } +/// Number of bound parameters implied by the next free placeholder index. +/// +/// # Panics +/// +/// Panics if `next_param_number` is less than 1. #[inline] fn sql_params_bound_count(next_param_number: i32) -> i32 { next_param_number @@ -53,6 +68,11 @@ fn sql_params_bound_count(next_param_number: i32) -> i32 { .expect("SQL parameter count underflow") } +/// Resolve area ids and SQL fragments for optional election / area filters. +/// +/// # Errors +/// +/// Returns an error if UUID parsing fails, or the SQL query fails. #[instrument(skip(hasura_transaction), err)] async fn get_area_ids( hasura_transaction: &Transaction<'_>, @@ -140,6 +160,15 @@ async fn get_area_ids( Ok((Some(area_ids), area_ids_join_clause, area_ids_where_clause)) } +/// Export enabled users for an area and authorized-election attribute. +/// +/// # Errors +/// +/// Returns an error if UUID parsing fails, `COPY` fails, or I/O while streaming results fails. +/// +/// # Panics +/// +/// Panics if the output temp file cannot be created or opened for writing. #[instrument(skip(keycloak_transaction), err)] pub async fn list_keycloak_enabled_users_by_area_id_and_authorized_elections( keycloak_transaction: &Transaction<'_>, @@ -228,8 +257,9 @@ pub async fn list_keycloak_enabled_users_by_area_id_and_authorized_elections( Ok(()) } -/// SQL boolean operator used to chain filter clauses in WHERE conditions. #[derive(Debug, Clone, Copy, EnumString, Display)] +/// SQL boolean operator used to chain filter clauses in WHERE conditions. +#[allow(missing_docs)] pub enum SqlBooleanOperator { #[strum(serialize = " AND")] And, @@ -238,6 +268,7 @@ pub enum SqlBooleanOperator { } #[derive(Debug, Clone, PartialEq, Eq, EnumString, Display)] +/// Filter option for sql queries. pub enum FilterOption { /// Those elements that contain the string are returned. IsLike(String), @@ -401,6 +432,8 @@ impl<'de> Deserialize<'de> for FilterOption { } #[derive(Debug, PartialEq, Eq, Clone, Default)] +/// List users filter. +#[allow(missing_docs)] pub struct ListUsersFilter { pub tenant_id: String, pub election_event_id: Option, @@ -424,6 +457,7 @@ pub struct ListUsersFilter { } impl ListUsersFilter { + /// Create a new list users filter. pub fn new(tenant_id: &str, realm: &str) -> Self { Self { tenant_id: tenant_id.to_string(), @@ -433,6 +467,7 @@ impl ListUsersFilter { } } +/// Append an `AND u. = true/false` clause when `value` is set. fn get_query_bool_condition(field: &str, value: Option) -> String { match value { Some(true) => format!(r"AND u.{} = true", field), @@ -489,6 +524,11 @@ fn get_sort_clause_and_field_param( } } +/// Count Keycloak users that match the filter. +/// +/// # Errors +/// +/// Returns an error if SQL preparation, query execution, or type conversion fails. #[instrument(skip(hasura_transaction, keycloak_transaction), err)] pub async fn count_keycloak_users( hasura_transaction: &Transaction<'_>, @@ -637,6 +677,11 @@ pub async fn count_keycloak_users( Ok(count) } +/// List Keycloak users with filters and pagination +/// +/// # Errors +/// +/// Returns an error if SQL preparation, query execution, area lookup, or config/env fails. #[instrument(skip(hasura_transaction, keycloak_transaction), err)] pub async fn list_users( hasura_transaction: &Transaction<'_>, @@ -928,6 +973,11 @@ pub async fn list_users( } } +/// Return users ids that match the filter. +/// +/// # Errors +/// +/// Returns an error if SQL preparation or execution fails. #[instrument(skip(hasura_transaction, keycloak_transaction, filter), err)] pub async fn list_users_ids( hasura_transaction: &Transaction<'_>, @@ -1113,6 +1163,11 @@ pub async fn list_users_ids( Ok(user_ids) } +/// List Keycloak users with vote info for the given election event. +/// +/// # Errors +/// +/// Returns an error if filters are incomplete, listing fails, or vote-info enrichment fails. #[instrument(skip(hasura_transaction, keycloak_transaction, filter), err)] pub async fn list_users_with_vote_info( hasura_transaction: &Transaction<'_>, @@ -1145,6 +1200,11 @@ pub async fn list_users_with_vote_info( Ok((users, users_count)) } +/// Count enabled users in a Keycloak realm. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the count query fails. #[instrument(skip(keycloak_transaction), err)] pub async fn count_keycloak_enabled_users( keycloak_transaction: &Transaction<'_>, @@ -1178,6 +1238,10 @@ pub async fn count_keycloak_enabled_users( } /// Use only for verifying application!, does not work as it seems for other situations, then use list_users instead. +/// +/// # Errors +/// +/// Returns an error if pool config, SQL preparation, or query execution fails. #[instrument(skip(hasura_transaction, keycloak_transaction), err)] pub async fn lookup_users( hasura_transaction: &Transaction<'_>, @@ -1366,21 +1430,33 @@ pub async fn lookup_users( } #[derive(Debug, Clone, PartialEq, Eq, EnumString, Display)] +/// Attributes filter by for sql queries. pub enum AttributesFilterBy { - IsLike, // Those elements that contain the string are returned - IsEqual, // Those elements that match precisely the string are returned - NotExist, // Those elements that Not exist with givin value - PartialLike, // Those elements that Not exist with givin value + /// Those elements that contain the string are returned + IsLike, + /// Those elements that match precisely the string are returned + IsEqual, + /// Those elements that Not exist with givin value + NotExist, + /// Those elements that Not exist with givin value + PartialLike, } #[derive(Debug, Clone)] +/// Attributes filter option for sql queries. pub struct AttributesFilterOption { + /// The value to filter by. pub value: String, + /// The filter by to use. pub filter_by: AttributesFilterBy, } impl AttributesFilterOption { /// Return the sql condition to filter at the given column, to be used in the WHERE clause + /// + /// # Panics + /// + /// Panics if `index` is zero (would underflow the name placeholder index). pub fn get_sql_filter_clause(&self, index: usize) -> String { let filter_option = self; let name_param = index @@ -1415,6 +1491,15 @@ impl AttributesFilterOption { } } +/// Count enabled realm users matching optional attribute filters. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the count query fails. +/// +/// # Panics +/// +/// Panics if attribute filter placeholder arithmetic overflows `usize`. #[instrument(skip(keycloak_transaction), err)] pub async fn count_keycloak_enabled_users_by_attrs( keycloak_transaction: &Transaction<'_>, @@ -1476,12 +1561,20 @@ pub async fn count_keycloak_enabled_users_by_attrs( // use std::error::Error; // use reqwest::Client; +/// Minimal Keycloak group row used when resolving parent paths. #[derive(Debug, Deserialize)] struct Group { + /// Group id from Keycloak. id: String, + /// Group display name. name: String, } +/// Whether the user has the id-card validation attribute set to the registered-voter value. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the lookup query fails. #[instrument(skip(keycloak_transaction), err)] pub async fn check_is_user_verified( keycloak_transaction: &Transaction<'_>, @@ -1521,6 +1614,10 @@ pub async fn check_is_user_verified( /// Returns a vector with user ids. /// It is up to the caller to handle when there are mutiple users with the same username or the vector is empty - not found. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the query fails. #[instrument(err, skip(keycloak_transaction))] pub async fn get_users_by_username( keycloak_transaction: &Transaction<'_>, @@ -1566,6 +1663,10 @@ pub async fn get_users_by_username( } /// Returns the username of the user id or None if it does not exist. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the query fails. #[instrument(err, skip(keycloak_transaction))] pub async fn get_username_by_id( keycloak_transaction: &Transaction<'_>, @@ -1612,6 +1713,11 @@ pub async fn get_username_by_id( } } +/// Read the user's voting area id attribute from Keycloak, if any. +/// +/// # Errors +/// +/// Returns an error if statement preparation or the query fails. #[instrument(err, skip(keycloak_transaction))] pub async fn get_user_area_id( keycloak_transaction: &Transaction<'_>, @@ -1660,6 +1766,11 @@ pub async fn get_user_area_id( } } +/// Count distinct voters in `cast_vote` for the tenant and optional election filters. +/// +/// # Errors +/// +/// Returns an error if UUID parsing, statement preparation, or the count query fails. #[instrument(skip(hasura_transaction), err)] pub async fn count_have_voted( hasura_transaction: &Transaction<'_>, @@ -1715,6 +1826,15 @@ pub async fn count_have_voted( Ok(count) } +/// Page users annotated with whether they cast a vote, using batched Keycloak queries. +/// +/// # Errors +/// +/// Returns an error if limits are missing, config cannot be read, or any underlying list/count fails. +/// +/// # Panics +/// +/// Panics if internal counters or offsets overflow (`i32` / `usize` arithmetic). #[instrument(skip(hasura_transaction, keycloak_transaction), err)] pub async fn list_users_has_voted( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/vault/aws_secret_manager.rs b/packages/windmill/src/services/vault/aws_secret_manager.rs index 46b00cca52..040f21f0a7 100644 --- a/packages/windmill/src/services/vault/aws_secret_manager.rs +++ b/packages/windmill/src/services/vault/aws_secret_manager.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! AWS Secrets Manager backend. + use super::{Vault, VaultManagerType}; use anyhow::{anyhow, Context, Result}; use async_trait::async_trait; @@ -11,9 +13,15 @@ use std::env; use tracing::{info, instrument}; #[derive(Debug)] +/// AWS Secrets Manager secret backend. pub struct AwsSecretManager; impl AwsSecretManager { + /// Applies `AWS_SM_KEY_PREFIX` to a logical key before sending it to AWS. + /// + /// # Errors + /// + /// Returns an error if `AWS_SM_KEY_PREFIX` is not set. fn get_prefixed_key(&self, key: String) -> Result { let key_prefix = env::var("AWS_SM_KEY_PREFIX").context("AWS_SM_KEY_PREFIX must be set")?; Ok(key_prefix + key.as_str()) @@ -24,6 +32,11 @@ impl AwsSecretManager { impl Vault for AwsSecretManager { // TODO: add back skip(value) #[instrument(err)] + /// Creates a new secret in AWS Secrets Manager. + /// + /// # Errors + /// + /// Returns an error if AWS configuration cannot be loaded or the AWS API call fails. async fn save_secret(&self, key: String, value: String) -> Result<()> { let shared_config = get_from_env_aws_config() .await @@ -42,6 +55,11 @@ impl Vault for AwsSecretManager { } #[instrument(err)] + /// Reads a secret value from AWS Secrets Manager. + /// + /// # Errors + /// + /// Returns an error if AWS configuration cannot be loaded or the key prefix is missing. async fn read_secret(&self, key: String) -> Result> { let shared_config = get_from_env_aws_config() .await @@ -60,6 +78,7 @@ impl Vault for AwsSecretManager { } #[instrument] + /// Identifies this backend as AWS Secrets Manager. fn vault_type(&self) -> VaultManagerType { VaultManagerType::AwsSecretManager } diff --git a/packages/windmill/src/services/vault/env_var_master_secret.rs b/packages/windmill/src/services/vault/env_var_master_secret.rs index 93703a8c26..749c71d91f 100644 --- a/packages/windmill/src/services/vault/env_var_master_secret.rs +++ b/packages/windmill/src/services/vault/env_var_master_secret.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Environment-variable based secret backend. + use super::{Vault, VaultManagerType}; use anyhow::{Context, Result}; use async_trait::async_trait; @@ -9,11 +11,17 @@ use std::env; use tracing::{error, info, instrument}; #[derive(Debug)] +/// Secret backend that reads the master secret from `MASTER_SECRET`. pub struct EnvVarMasterSecret; #[async_trait] impl Vault for EnvVarMasterSecret { #[instrument(err)] + /// Rejects storing secrets and prints the generated value for manual setup. + /// + /// # Errors + /// + /// Always returns an error to force operators to set `MASTER_SECRET` explicitly. async fn save_secret(&self, _key: String, value: String) -> Result<()> { // If initialize_master_secret failed to read, it creates the master secret value // and tries to save it calling to this function. @@ -25,6 +33,11 @@ impl Vault for EnvVarMasterSecret { } #[instrument(err)] + /// Reads `MASTER_SECRET` from the process environment. + /// + /// # Errors + /// + /// Returns an error only if reading the environment variable fails unexpectedly. async fn read_secret(&self, _key: String) -> Result> { match env::var("MASTER_SECRET") { Ok(master_secret) => Ok(Some(master_secret)), @@ -36,6 +49,7 @@ impl Vault for EnvVarMasterSecret { } #[instrument] + /// Identifies this backend as environment-variable based. fn vault_type(&self) -> VaultManagerType { VaultManagerType::EnvVarMasterSecret } diff --git a/packages/windmill/src/services/vault/hashicorp_vault.rs b/packages/windmill/src/services/vault/hashicorp_vault.rs index e2f9cc660d..e14de2f713 100644 --- a/packages/windmill/src/services/vault/hashicorp_vault.rs +++ b/packages/windmill/src/services/vault/hashicorp_vault.rs @@ -2,6 +2,11 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! HashiCorp Vault backend. +//! +//! This implementation reads/writes raw string secrets to the Vault HTTP API +//! using `VAULT_SERVER_URL` and `VAULT_TOKEN`. + use super::{Vault, VaultManagerType}; use anyhow::{Context, Result}; use async_trait::async_trait; @@ -13,26 +18,41 @@ use std::env; use tracing::{info, instrument}; #[derive(Serialize, Deserialize)] +/// Secret payload as stored under the configured Vault path. +#[allow(missing_docs_in_private_items)] struct VaultSecret { data: Option, value: Option, } #[derive(Serialize, Deserialize)] +/// Response wrapper returned by the Vault HTTP API on secret reads. struct VaultRead { + /// Authentication metadata returned by Vault. auth: Option, + /// Secret data envelope. data: VaultSecret, + /// Lease duration in seconds. lease_duration: i64, + /// Lease identifier. lease_id: String, + /// Whether the lease can be renewed. renewable: bool, } #[derive(Debug)] +/// HashiCorp Vault secret backend. pub struct HashiCorpVault; #[async_trait] impl Vault for HashiCorpVault { #[instrument(skip(value), err)] + /// Stores a secret value at `secrets/` via the Vault HTTP API. + /// + /// # Errors + /// + /// Returns an error if required environment variables are missing, if the + /// request fails, or if Vault returns a non-success status. async fn save_secret(&self, key: String, value: String) -> Result<()> { let server_url = env::var("VAULT_SERVER_URL").context("VAULT_SERVER_URL must be set")?; let token = env::var("VAULT_TOKEN").context("VAULT_TOKEN must be set")?; @@ -53,6 +73,12 @@ impl Vault for HashiCorpVault { } #[instrument(err)] + /// Reads a secret value from `secrets/` via the Vault HTTP API. + /// + /// # Errors + /// + /// Returns an error if required environment variables are missing, if the + /// request fails, or if the response cannot be parsed. async fn read_secret(&self, key: String) -> Result> { let server_url = env::var("VAULT_SERVER_URL").context("VAULT_SERVER_URL must be set")?; let token = env::var("VAULT_TOKEN").context("VAULT_TOKEN must be set")?; @@ -74,6 +100,7 @@ impl Vault for HashiCorpVault { } #[instrument] + /// Identifies this backend as HashiCorp Vault. fn vault_type(&self) -> VaultManagerType { VaultManagerType::HashiCorpVault } diff --git a/packages/windmill/src/services/vault/mod.rs b/packages/windmill/src/services/vault/mod.rs index 5b9629499f..1a2256dd9e 100644 --- a/packages/windmill/src/services/vault/mod.rs +++ b/packages/windmill/src/services/vault/mod.rs @@ -2,6 +2,9 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Secret backends (AWS Secrets Manager, HashiCorp Vault, environment) +//! for tasks that need credentials or key material. + mod aws_secret_manager; mod env_var_master_secret; mod hashicorp_vault; diff --git a/packages/windmill/src/services/vault/vault.rs b/packages/windmill/src/services/vault/vault.rs index 3799a528d4..b0a72ff45d 100644 --- a/packages/windmill/src/services/vault/vault.rs +++ b/packages/windmill/src/services/vault/vault.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Secret storage and application-level secret helpers. + use crate::postgres::secret::{get_secret_by_key, insert_secret}; use crate::services::electoral_log::ElectoralLog; use crate::services::vault::{ @@ -20,20 +22,31 @@ use tokio; use tokio::sync::OnceCell; use tracing::{info, instrument}; +/// Key name used by secret backends to store the master secret. const MASTER_SECRET_KEY_NAME: &str = "master_secret"; +/// Canonical lowercase name accepted for AWS Secrets Manager backend selection. const LOWER_AWS_SECRETS_MANAGER: &str = "awssecretsmanager"; #[derive(EnumString, Display, Debug)] +/// Supported secret backends used to store the master secret. +#[allow(missing_docs)] pub enum VaultManagerType { HashiCorpVault, AwsSecretManager, EnvVarMasterSecret, } +/// Process-wide cache of the master secret once initialized. static MASTER_SECRET: OnceCell = OnceCell::const_new(); #[instrument] +/// Verifies the configured secret backend can access the master secret. +/// +/// # Errors +/// +/// Returns an error if the configured backend cannot be constructed or cannot +/// read the master secret entry. pub async fn check_master_secret() -> Result<()> { let vault = get_vault()?; @@ -45,6 +58,19 @@ pub async fn check_master_secret() -> Result<()> { } #[instrument] +/// Loads the master secret from the configured backend, creating it if missing. +/// +/// If the backend has no existing master secret entry, a new symmetric key is +/// generated and persisted in that backend. +/// +/// # Errors +/// +/// Returns an error if the backend cannot be constructed, the backend cannot be +/// accessed, or saving a newly generated secret fails. +/// +/// # Panics +/// +/// Panics if an existing master secret value is not valid hex. async fn initialize_master_secret() -> Result { let vault = get_vault().with_context(|| "Failed to initialize vault")?; @@ -66,6 +92,12 @@ async fn initialize_master_secret() -> Result { } } #[instrument] +/// Returns the process-wide master secret used to encrypt/decrypt stored secrets. +/// +/// # Errors +/// +/// Returns an error if the master secret cannot be loaded or created via the +/// configured backend. pub async fn get_master_secret() -> Result { if let Some(secret) = MASTER_SECRET.get() { return Ok(*secret); @@ -74,13 +106,34 @@ pub async fn get_master_secret() -> Result { } #[async_trait] +/// Backend used for storing raw secret values outside Postgres. +/// +/// In practice this is used for the master secret, so the Postgres-stored +/// secrets can be encrypted at rest with a key that is not stored in Postgres. pub trait Vault: Send { + /// Persists a secret value in the backend under a given key. + /// + /// # Errors + /// + /// Returns an error if the backend cannot store the value. async fn save_secret(&self, key: String, value: String) -> Result<()>; + /// Retrieves a secret value from the backend. + /// + /// # Errors + /// + /// Returns an error if the backend cannot be accessed or the value cannot + /// be read. async fn read_secret(&self, key: String) -> Result>; + /// Returns the backend kind for logging/diagnostics. fn vault_type(&self) -> VaultManagerType; } #[instrument(err)] +/// Constructs the secret backend selected by `SECRETS_BACKEND`. +/// +/// # Errors +/// +/// Returns an error if `SECRETS_BACKEND` is set to an unknown value. pub fn get_vault() -> Result> { let mut vault_name = std::env::var("SECRETS_BACKEND") .unwrap_or(VaultManagerType::EnvVarMasterSecret.to_string()); @@ -101,6 +154,15 @@ pub fn get_vault() -> Result> { } #[instrument(skip(hasura_transaction, value), err)] +/// Encrypts and stores a secret value in Postgres for later retrieval. +/// +/// The plaintext is encrypted using the current master secret and the resulting +/// ciphertext is stored via the `secret` table helpers. +/// +/// # Errors +/// +/// Returns an error if the key already exists, if encryption/serialization +/// fails, or if the database insert fails. pub async fn save_secret( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -135,6 +197,12 @@ pub async fn save_secret( } #[instrument(skip(hasura_transaction), err)] +/// Loads and decrypts a Postgres-stored secret. +/// +/// # Errors +/// +/// Returns an error if the database lookup fails, the ciphertext cannot be +/// deserialized, decryption fails, or the decrypted bytes are not UTF-8. pub async fn read_secret( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -158,6 +226,16 @@ pub async fn read_secret( } #[instrument(err)] +/// Returns (and if needed creates) the admin user's signing keypair secret. +/// +/// When the key does not exist yet, this generates a new signing key, stores +/// the private key in the secret store, and publishes the corresponding public +/// key via the electoral log. +/// +/// # Errors +/// +/// Returns an error if secret storage fails, key generation/encoding fails, or +/// the electoral-log notification fails. pub async fn get_admin_user_signing_key( hasura_transaction: &Transaction<'_>, elog_database: &str, @@ -205,10 +283,12 @@ pub async fn get_admin_user_signing_key( Ok(sk) } +/// Secret key name used to store a voter's signing key for an election event. fn voter_vault_lookup_key(tenant_id: &str, event_id: &str, user_id: &str) -> String { format!("voter_signing_key-{}-{}-{}", tenant_id, event_id, user_id) } +/// Secret key name used to store an admin user's signing key. fn admin_vault_lookup_key(tenant_id: &str, user_id: &str) -> String { format!("admin_signing_key-{}-{}", tenant_id, user_id) } diff --git a/packages/windmill/src/services/voting_status.rs b/packages/windmill/src/services/voting_status.rs index 4b50815bdc..aa6290e9be 100644 --- a/packages/windmill/src/services/voting_status.rs +++ b/packages/windmill/src/services/voting_status.rs @@ -1,8 +1,11 @@ -use crate::postgres::election_event::get_election_event_by_id; -use crate::postgres::election_event::update_election_event_status; // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Per-channel voting status updates, electoral-log posts, and election status rollups. + +use crate::postgres::election_event::get_election_event_by_id; +use crate::postgres::election_event::update_election_event_status; use crate::services::election_event_board::get_election_event_board; use crate::services::election_event_status; use crate::services::electoral_log::*; @@ -24,6 +27,8 @@ use tracing::instrument; use super::election_event_status::get_election_event_status; #[derive(Serialize, Deserialize, Debug)] +/// Update election voting status input. +#[allow(missing_docs)] pub struct UpdateElectionVotingStatusInput { pub election_event_id: String, pub election_id: String, @@ -32,10 +37,17 @@ pub struct UpdateElectionVotingStatusInput { } #[derive(Serialize, Deserialize, Debug)] +/// Update election voting status output. +#[allow(missing_docs)] pub struct UpdateElectionVotingStatusOutput { pub election_id: String, } +/// Update per-channel voting status on an election event and persist aggregate event status. +/// +/// # Errors +/// +/// Returns an error if the event cannot be loaded, JSON deserialization fails, or updates/postings fail. #[instrument(err)] pub async fn update_election_status( tenant_id: String, @@ -147,6 +159,11 @@ pub async fn update_election_status( Ok(()) } +/// Append the appropriate electoral-log statement when voting status changes on a channel. +/// +/// # Errors +/// +/// Returns an error if the bulletin board name is missing or electoral log posts fail. #[instrument(err)] pub async fn update_board_on_status_change( hasura_transaction: &Transaction<'_>, @@ -236,14 +253,24 @@ pub async fn update_board_on_status_change( Ok(()) } +/// Election status info (zero based). #[derive(Debug)] pub struct ElectionStatusInfo { + /// Not started votes. pub total_not_started_votes: i64, + /// Open votes. pub total_open_votes: i64, + /// Closed votes. pub total_closed_votes: i64, + /// Started votes. pub total_started_votes: i64, } +/// Return for election his status info. +/// +/// # Panics +/// +/// Panics if per-status counters overflow `i64` when summing channels. #[instrument(skip(election), ret)] pub fn get_election_status_info(election: &Election) -> ElectionStatusInfo { let mut total_not_started_votes: i64 = 0; diff --git a/packages/windmill/src/tasks/activity_logs_report.rs b/packages/windmill/src/tasks/activity_logs_report.rs index 3f2d9157cd..b443b356ac 100644 --- a/packages/windmill/src/tasks/activity_logs_report.rs +++ b/packages/windmill/src/tasks/activity_logs_report.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Activity log export and reporting worker. + use crate::services::tasks_semaphore::acquire_semaphore; use crate::{ postgres::reports::Report, @@ -23,6 +25,7 @@ use deadpool_postgres::Client as DbClient; use sequent_core::types::hasura::core::TasksExecution; use tracing::instrument; +/// Renders the activity log report into a document with the given ID. async fn generate_activity_logs_report_impl( tenant_id: String, election_event_id: String, @@ -91,40 +94,50 @@ async fn generate_activity_logs_report_impl( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn generate_activity_logs_report( - tenant_id: String, - election_event_id: String, - document_id: String, - format: ReportFormat, - report_clone: Option, - task_execution: TasksExecution, -) -> Result<()> { - match generate_activity_logs_report_impl( - tenant_id, - election_event_id, - document_id.clone(), - format, - report_clone, - ) - .await - { - Ok(()) => { - update_complete(&task_execution, Some(document_id)) - .await - .context("Failed to update task execution status to COMPLETED")?; - Ok(()) - } - Err(err) => { - if let Err(update_err) = update_fail(&task_execution, &format!("{err:?}")).await { - tracing::error!( - "Failed to update task execution status to FAILED: {:?}", - update_err - ); +mod generate_activity_logs_report_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: export activity logs for an election event into a stored document. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn generate_activity_logs_report( + tenant_id: String, + election_event_id: String, + document_id: String, + format: ReportFormat, + report_clone: Option, + task_execution: TasksExecution, + ) -> Result<()> { + match super::generate_activity_logs_report_impl( + tenant_id, + election_event_id, + document_id.clone(), + format, + report_clone, + ) + .await + { + Ok(()) => { + update_complete(&task_execution, Some(document_id)) + .await + .context("Failed to update task execution status to COMPLETED")?; + Ok(()) + } + Err(err) => { + if let Err(update_err) = update_fail(&task_execution, &format!("{err:?}")).await { + tracing::error!( + "Failed to update task execution status to FAILED: {:?}", + update_err + ); + } + Err(err) } - Err(err) } } } + +pub use generate_activity_logs_report_task::generate_activity_logs_report; diff --git a/packages/windmill/src/tasks/create_ballot_receipt.rs b/packages/windmill/src/tasks/create_ballot_receipt.rs index 1148980530..7f794e2613 100644 --- a/packages/windmill/src/tasks/create_ballot_receipt.rs +++ b/packages/windmill/src/tasks/create_ballot_receipt.rs @@ -2,6 +2,10 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Generates a voter ballot receipt document. + +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] use crate::services::database::{get_hasura_pool, get_keycloak_pool}; use crate::services::reports::ballot_receipt::{BallotData, BallotTemplate}; use crate::services::reports::template_renderer::{ @@ -17,6 +21,7 @@ use sequent_core::types::date_time::{DateFormat, TimeZone}; use sequent_core::types::hasura::core::TasksExecution; use tracing::instrument; +/// Celery task: render a voter ballot receipt PDF and attach it to a document. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task] diff --git a/packages/windmill/src/tasks/create_keys.rs b/packages/windmill/src/tasks/create_keys.rs index b81d6a2b3a..84e5b459f9 100644 --- a/packages/windmill/src/tasks/create_keys.rs +++ b/packages/windmill/src/tasks/create_keys.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Keys ceremony background steps (board messages and DB updates). + use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::keys_ceremony::{get_keys_ceremony_by_id, update_keys_ceremony_status}; use crate::postgres::trustee::get_trustees_by_id; @@ -21,12 +23,20 @@ use serde::{Deserialize, Serialize}; use std::default::Default; use tracing::{info, instrument}; +/// JSON body supplied when trustees submit public keys for a keys ceremony. #[derive(Deserialize, Debug, Serialize, Clone)] pub struct CreateKeysBody { + /// PEM or encoded public keys for participating trustees. pub trustee_pks: Vec, + /// Minimum number of trustees required to reconstruct election keys. pub threshold: usize, } +/// Creates public keys on the bulletin board when a keys ceremony may start. +/// +/// # Errors +/// +/// Returns an error on missing ceremony data, trustee lookup failures, or DB commit issues. pub async fn create_keys_impl( tenant_id: String, election_event_id: String, @@ -105,15 +115,25 @@ pub async fn create_keys_impl( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn create_keys( - tenant_id: String, - election_event_id: String, - keys_ceremony_id: String, -) -> Result<()> { - create_keys_impl(tenant_id, election_event_id, keys_ceremony_id) - .await - .map_err(|err| Error::from(err.context("Task failed"))) +mod create_keys_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: create public keys for a keys ceremony. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn create_keys( + tenant_id: String, + election_event_id: String, + keys_ceremony_id: String, + ) -> Result<()> { + create_keys_impl(tenant_id, election_event_id, keys_ceremony_id) + .await + .map_err(|err| Error::from(err.context("Task failed"))) + } } + +pub use create_keys_task::create_keys; diff --git a/packages/windmill/src/tasks/delete_election_event.rs b/packages/windmill/src/tasks/delete_election_event.rs index 94bfa88427..7ae10ef93d 100644 --- a/packages/windmill/src/tasks/delete_election_event.rs +++ b/packages/windmill/src/tasks/delete_election_event.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Deletes an election event and related data asynchronously. + use crate::postgres::election::get_elections_ids; use crate::postgres::election_event::delete_election_event as delete_election_event_postgres; use crate::services::delete_election_event::delete_election_event_b3; @@ -21,6 +24,7 @@ use futures::try_join; use sequent_core::types::hasura::core::TasksExecution; use tracing::instrument; +/// Deletes ImmuDB logs, B3 objects, S3 documents, and the Keycloak realm. #[instrument(err)] async fn delete_election_event_related_data( tenant_id: &str, @@ -37,6 +41,7 @@ async fn delete_election_event_related_data( Ok(()) } +/// Removes the election event from the database, then purges related data. #[instrument(err)] async fn delete_election_event( tenant_id: String, @@ -87,25 +92,35 @@ async fn delete_election_event( .await } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn delete_election_event_t( - tenant_id: String, - election_event_id: String, - realm: String, - task_execution: TasksExecution, -) -> Result<()> { - let res = delete_election_event(tenant_id, election_event_id, realm).await; +mod delete_election_event_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - match res { - Ok(_) => { - update_complete(&task_execution, None).await?; - } - Err(err) => { - let error = format!("Error deleting election event: {err}"); - update_fail(&task_execution, &error).await?; - } - }; - Ok(()) + use super::*; + + /// Celery task: hard-delete an election event and mark task execution complete or failed. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn delete_election_event_t( + tenant_id: String, + election_event_id: String, + realm: String, + task_execution: TasksExecution, + ) -> Result<()> { + let res = super::delete_election_event(tenant_id, election_event_id, realm).await; + + match res { + Ok(_) => { + update_complete(&task_execution, None).await?; + } + Err(err) => { + let error = format!("Error deleting election event: {err}"); + update_fail(&task_execution, &error).await?; + } + }; + Ok(()) + } } + +pub use delete_election_event_task::delete_election_event_t; diff --git a/packages/windmill/src/tasks/electoral_log.rs b/packages/windmill/src/tasks/electoral_log.rs index 053f807b16..5688f5dcbe 100644 --- a/packages/windmill/src/tasks/electoral_log.rs +++ b/packages/windmill/src/tasks/electoral_log.rs @@ -1,6 +1,6 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // SPDX-License-Identifier: AGPL-3.0-only - +//! Electoral audit log enqueue, batch processing, and RabbitMQ dispatcher. use crate::postgres::election_event::get_election_event_by_id; use crate::services::celery_app::get_celery_connection; use crate::services::celery_app::Queue; @@ -34,6 +34,7 @@ use lapin::{ /// - `Internal` → `"internal"` /// - `KeycloakEvent(s)` → `s` (the raw Keycloak event type, e.g. `"LOGIN"`) #[derive(Clone, Debug, PartialEq)] +#[allow(missing_docs)] pub enum LogMessageType { Internal, KeycloakEvent(String), @@ -70,14 +71,14 @@ impl<'de> Deserialize<'de> for LogMessageType { /// - `Communications(msg)` → `"communications "` /// - `Plain(s)` → `s` #[derive(Clone, Debug, PartialEq)] +#[allow(missing_docs)] pub enum LogEventBody { - /// Body from a send-template action; contains the template message. Communications(String), - /// Body from a standard Keycloak event; typically the error field or "null". Plain(String), } impl LogEventBody { + /// Returns the single-line wire form of this body (same shape as JSON string values). pub fn as_raw(&self) -> String { match self { LogEventBody::Communications(msg) => format!("communications {}", msg), @@ -112,250 +113,269 @@ impl<'de> Deserialize<'de> for LogEventBody { /// Represents an incoming log event. #[derive(Serialize, Deserialize, Clone, Debug)] pub struct LogEventInput { + /// Election event ID. pub election_event_id: String, + /// Whether this is an internal event or a Keycloak-typed admin event. pub message_type: LogMessageType, + /// Keycloak user ID. pub user_id: Option, + /// Optional username. pub username: Option, + /// Tenant ID. pub tenant_id: String, + /// Typed body payload. pub body: LogEventBody, } -/// Enqueue the electoral log event. -/// This task is routed to the durable electoral_log_batch_queue. -#[instrument(skip_all, err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn enqueue_electoral_log_event(input: LogEventInput) -> Result<()> { - // By calling this task, the event is enqueued into the electoral_log_batch_queue. - Ok(()) -} +/// Celery tasks for electoral log enqueue, batch processing, and the RabbitMQ dispatcher. +mod electoral_log_tasks { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Enqueue the electoral log event. + /// This task is routed to the durable electoral_log_batch_queue. + #[instrument(skip_all, err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn enqueue_electoral_log_event(input: LogEventInput) -> Result<()> { + // By calling this task, the event is enqueued into the electoral_log_batch_queue. + Ok(()) + } -/// Process a batch of electoral log events. -/// Uses a single Hasura transaction to fetch event details and group messages by board, -/// then for each board group, opens an immudb session/transaction to insert all messages. -#[instrument(skip_all, err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn process_electoral_log_events_batch(events: Vec) -> Result<()> { - let mut messages_by_board: HashMap> = HashMap::new(); - - let mut hasura_db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .with_context(|| "Error getting DB pool for batch processing")?; - let hasura_tx = hasura_db_client - .transaction() - .await - .with_context(|| "Error starting Hasura transaction")?; - - let mut keycloak_db_client: DbClient = get_keycloak_pool() - .await - .get() - .await - .with_context(|| "Error getting keycloak DB pool for batch processing")?; - let keycloak_transaction = keycloak_db_client - .transaction() - .await - .with_context(|| "Error starting keycloak transaction")?; - - for input in events.iter() { - let election_event = - get_election_event_by_id(&hasura_tx, &input.tenant_id, &input.election_event_id) - .await - .with_context(|| "Error getting election event")?; + /// Process a batch of electoral log events. + /// Uses a single Hasura transaction to fetch event details and group messages by board, + /// then for each board group, opens an immudb session/transaction to insert all messages. + #[instrument(skip_all, err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn process_electoral_log_events_batch(events: Vec) -> Result<()> { + let mut messages_by_board: HashMap> = HashMap::new(); - let board_name = get_election_event_board(election_event.bulletin_board_reference.clone()) - .with_context(|| "Error getting election event board")?; + let mut hasura_db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .with_context(|| "Error getting DB pool for batch processing")?; + let hasura_tx = hasura_db_client + .transaction() + .await + .with_context(|| "Error starting Hasura transaction")?; - let event_message = match &input.message_type { - LogMessageType::Internal => { - let message: ElectoralLogMessage = deserialize_str(&input.body.as_raw()) - .with_context(|| "Error parsing input.body into a ElectoralLogMessage")?; - message - } - LogMessageType::KeycloakEvent(event_type) => { - let user_id = input - .user_id - .clone() - .unwrap_or_else(|| "unknown_user".into()); - let username = input.username.clone(); - let realm = get_event_realm(&input.tenant_id, &input.election_event_id); - let user_area_id = get_user_area_id(&keycloak_transaction, &realm, &user_id) + let mut keycloak_db_client: DbClient = get_keycloak_pool() + .await + .get() + .await + .with_context(|| "Error getting keycloak DB pool for batch processing")?; + let keycloak_transaction = keycloak_db_client + .transaction() + .await + .with_context(|| "Error starting keycloak transaction")?; + + for input in events.iter() { + let election_event = + get_election_event_by_id(&hasura_tx, &input.tenant_id, &input.election_event_id) .await - .with_context(|| "Error getting user area id")?; - let electoral_log = ElectoralLog::new( - &hasura_tx, - &input.tenant_id, - Some(&election_event.id), - &board_name, - ) - .await - .with_context(|| "Error initializing electoral log")?; + .with_context(|| "Error getting election event")?; + + let board_name = + get_election_event_board(election_event.bulletin_board_reference.clone()) + .with_context(|| "Error getting election event board")?; - if let LogEventBody::Communications(ref template_body) = input.body { - let send_template_msg = electoral_log - .build_send_template_message( - Some(template_body.clone()), + let event_message = match &input.message_type { + LogMessageType::Internal => { + let message: ElectoralLogMessage = deserialize_str(&input.body.as_raw()) + .with_context(|| "Error parsing input.body into a ElectoralLogMessage")?; + message + } + LogMessageType::KeycloakEvent(event_type) => { + let user_id = input + .user_id + .clone() + .unwrap_or_else(|| "unknown_user".into()); + let username = input.username.clone(); + let realm = get_event_realm(&input.tenant_id, &input.election_event_id); + let user_area_id = get_user_area_id(&keycloak_transaction, &realm, &user_id) + .await + .with_context(|| "Error getting user area id")?; + let electoral_log = ElectoralLog::new( + &hasura_tx, + &input.tenant_id, + Some(&election_event.id), + &board_name, + ) + .await + .with_context(|| "Error initializing electoral log")?; + + if let LogEventBody::Communications(ref template_body) = input.body { + let send_template_msg = electoral_log + .build_send_template_message( + Some(template_body.clone()), + input.election_event_id.clone(), + Some(user_id.clone()), + username.clone(), + None, + user_area_id.clone(), + ) + .with_context(|| "Error building send template message")?; + messages_by_board + .entry(board_name.clone()) + .or_default() + .push(send_template_msg); + } + + electoral_log + .build_keycloak_event_message( input.election_event_id.clone(), + event_type.clone(), + input.body.as_raw(), Some(user_id.clone()), username.clone(), - None, - user_area_id.clone(), + user_area_id, ) - .with_context(|| "Error building send template message")?; - messages_by_board - .entry(board_name.clone()) - .or_default() - .push(send_template_msg); + .with_context(|| "Error building keycloak event message")? } + }; + + messages_by_board + .entry(board_name.clone()) + .or_default() + .push(event_message); + } - electoral_log - .build_keycloak_event_message( - input.election_event_id.clone(), - event_type.clone(), - input.body.as_raw(), - Some(user_id.clone()), - username.clone(), - user_area_id, + hasura_tx + .commit() + .await + .with_context(|| "Error committing Hasura transaction")?; + + for (board, messages) in messages_by_board.into_iter() { + let mut board_client = get_board_client().await?; + board_client.open_session(&board).await?; + let immudb_tx = board_client.new_tx(TxMode::ReadWrite).await?; + board_client + .insert_electoral_log_messages_batch(&immudb_tx, &messages) + .await + .with_context(|| { + format!( + "Error inserting batch electoral log messages for board {}", + board ) - .with_context(|| "Error building keycloak event message")? - } - }; + })?; + board_client.commit(&immudb_tx).await.with_context(|| { + format!("Error committing immudb transaction for board {}", board) + })?; + board_client.close_session().await?; + } - messages_by_board - .entry(board_name.clone()) - .or_default() - .push(event_message); + Ok(()) } - hasura_tx - .commit() - .await - .with_context(|| "Error committing Hasura transaction")?; - - for (board, messages) in messages_by_board.into_iter() { - let mut board_client = get_board_client().await?; - board_client.open_session(&board).await?; - let immudb_tx = board_client.new_tx(TxMode::ReadWrite).await?; - board_client - .insert_electoral_log_messages_batch(&immudb_tx, &messages) + /// Dispatcher: repeatedly reads batches of messages from the electoral_log_batch_queue and dispatches them + /// to the processing task. Each batch is processed sequentially so that only a single batch is held in memory. + #[instrument(skip_all, err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 30, max_retries = 0, expires = 1)] + pub async fn electoral_log_batch_dispatcher() -> Result<()> { + info!("starting electoral_log_batch_dispatcher"); + + // Reuse the global AMQP connection. + let connection_arc = get_celery_connection().await?; + let channel = connection_arc + .create_channel() .await - .with_context(|| { - format!( - "Error inserting batch electoral log messages for board {}", - board - ) - })?; - board_client - .commit(&immudb_tx) + .with_context(|| "Error creating RabbitMQ channel")?; + + let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; + let queue_name = Queue::ElectoralLogEvent.queue_name(&slug); + let _queue = channel + .queue_declare( + &queue_name, + QueueDeclareOptions { + durable: true, + ..Default::default() + }, + FieldTable::default(), + ) .await - .with_context(|| format!("Error committing immudb transaction for board {}", board))?; - board_client.close_session().await?; - } - - Ok(()) -} + .with_context(|| "Error declaring electoral_log_batch_queue")?; + + // Get the batch size from PgConfig. + let batch_size: usize = PgConfig::from_env()?.default_sql_batch_size.try_into()?; + + loop { + info!("starting a new batch for queue {queue_name}, max batch_size={batch_size}"); + let mut batch_deliveries = Vec::with_capacity(batch_size); + for _ in 0..batch_size { + if let Some(delivery) = channel + .basic_get(&queue_name, BasicGetOptions { no_ack: false }) + .await? + { + info!("adding delivery element to batch_deliveries"); + batch_deliveries.push(delivery); + } else { + info!("not adding to batch_deliveries, break"); + break; + } + } -/// Dispatcher: repeatedly reads batches of messages from the electoral_log_batch_queue and dispatches them -/// to the processing task. Each batch is processed sequentially so that only a single batch is held in memory. -#[instrument(skip_all, err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 30, max_retries = 0, expires = 1)] -pub async fn electoral_log_batch_dispatcher() -> Result<()> { - info!("starting electoral_log_batch_dispatcher"); - - // Reuse the global AMQP connection. - let connection_arc = get_celery_connection().await?; - let channel = connection_arc - .create_channel() - .await - .with_context(|| "Error creating RabbitMQ channel")?; - - let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; - let queue_name = Queue::ElectoralLogEvent.queue_name(&slug); - let _queue = channel - .queue_declare( - &queue_name, - QueueDeclareOptions { - durable: true, - ..Default::default() - }, - FieldTable::default(), - ) - .await - .with_context(|| "Error declaring electoral_log_batch_queue")?; - - // Get the batch size from PgConfig. - let batch_size: usize = PgConfig::from_env()?.default_sql_batch_size.try_into()?; - - loop { - info!("starting a new batch for queue {queue_name}, max batch_size={batch_size}"); - let mut batch_deliveries = Vec::with_capacity(batch_size); - for _ in 0..batch_size { - if let Some(delivery) = channel - .basic_get(&queue_name, BasicGetOptions { no_ack: false }) - .await? - { - info!("adding delivery element to batch_deliveries"); - batch_deliveries.push(delivery); - } else { - info!("not adding to batch_deliveries, break"); + if batch_deliveries.is_empty() { + info!("no more elements to process in queue"); break; } - } - - if batch_deliveries.is_empty() { - info!("no more elements to process in queue"); - break; - } - info!( - "deserializing {len} elements for this batch", - len = batch_deliveries.len() - ); - - // Deserialize messages sequentially. - let mut events = Vec::with_capacity(batch_deliveries.len()); - for delivery in &batch_deliveries { - // Parse the raw message into a JSON value. - let v: serde_json::Value = serde_json::from_slice(&delivery.data) - .with_context(|| "Error parsing Celery message as JSON")?; - // Expect the message to be an array. - if let serde_json::Value::Array(arr) = v { - if arr.len() < 2 { - return Err( - "Invalid message format: expected array with at least 2 elements".into(), - ); + info!( + "deserializing {len} elements for this batch", + len = batch_deliveries.len() + ); + + // Deserialize messages sequentially. + let mut events = Vec::with_capacity(batch_deliveries.len()); + for delivery in &batch_deliveries { + // Parse the raw message into a JSON value. + let v: serde_json::Value = serde_json::from_slice(&delivery.data) + .with_context(|| "Error parsing Celery message as JSON")?; + // Expect the message to be an array. + if let serde_json::Value::Array(arr) = v { + if arr.len() < 2 { + return Err( + "Invalid message format: expected array with at least 2 elements" + .into(), + ); + } + let payload = &arr[1]; + let input_value = payload + .get("input") + .ok_or_else(|| anyhow!("Missing 'input' field in message payload"))?; + let event: LogEventInput = serde_json::from_value(input_value.clone()) + .with_context(|| "Error deserializing LogEventInput from input field")?; + events.push(event); + } else { + return Err("Invalid message format: expected JSON array".into()); } - let payload = &arr[1]; - let input_value = payload - .get("input") - .ok_or_else(|| anyhow!("Missing 'input' field in message payload"))?; - let event: LogEventInput = serde_json::from_value(input_value.clone()) - .with_context(|| "Error deserializing LogEventInput from input field")?; - events.push(event); - } else { - return Err("Invalid message format: expected JSON array".into()); } - } - - // Dispatch the processing task via the Celery app. - let celery_app = crate::services::celery_app::get_celery_app().await; - let celery_task = process_electoral_log_events_batch::new(events); - info!("sending processing task for current batch"); - celery_app - .send_task(celery_task) - .await - .with_context(|| "Error sending process_electoral_log_events_batch task")?; - // Acknowledge all messages in the current batch. - for delivery in batch_deliveries { - channel - .basic_ack(delivery.delivery_tag, BasicAckOptions::default()) + // Dispatch the processing task via the Celery app. + let celery_app = crate::services::celery_app::get_celery_app().await; + let celery_task = process_electoral_log_events_batch::new(events); + info!("sending processing task for current batch"); + celery_app + .send_task(celery_task) .await - .with_context(|| "Error acknowledging message")?; + .with_context(|| "Error sending process_electoral_log_events_batch task")?; + + // Acknowledge all messages in the current batch. + for delivery in batch_deliveries { + channel + .basic_ack(delivery.delivery_tag, BasicAckOptions::default()) + .await + .with_context(|| "Error acknowledging message")?; + } } + info!("finishing electoral_log_batch_dispatcher"); + Ok(()) } - info!("finishing electoral_log_batch_dispatcher"); - Ok(()) } + +pub use electoral_log_tasks::{ + electoral_log_batch_dispatcher, enqueue_electoral_log_event, process_electoral_log_events_batch, +}; diff --git a/packages/windmill/src/tasks/execute_tally_session.rs b/packages/windmill/src/tasks/execute_tally_session.rs index 0d3ebc710f..d6b337cb07 100644 --- a/packages/windmill/src/tasks/execute_tally_session.rs +++ b/packages/windmill/src/tasks/execute_tally_session.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Runs tally ceremony steps inside coordinated database transactions. use crate::postgres::area::get_event_areas; use crate::postgres::contest::export_contests; use crate::postgres::election::set_election_initialization_report_generated; @@ -108,6 +109,7 @@ use tokio::time::Duration as ChronoDuration; use tracing::{event, info, instrument, warn, Level}; use uuid::Uuid; +/// Deserializes ballot style rows into domain [`BallotStyle`] values. #[instrument(skip_all, err)] fn get_ballot_styles(ballot_styles: &[BallotStyleHasura]) -> Result> { // get ballot styles, from where we'll get the Contest(s) @@ -127,6 +129,7 @@ fn get_ballot_styles(ballot_styles: &[BallotStyleHasura]) -> Result>>() } +/// Builds per-area contest tally inputs using mixnet ciphertexts and contest metadata. #[instrument(skip_all, err)] async fn generate_area_contests_mc( hasura_transaction: &Transaction<'_>, @@ -262,6 +265,7 @@ async fn generate_area_contests_mc( Ok(almost_vec) } +/// Builds [`AreaContestDataType`] rows from decrypted plaintexts and ballot styles. #[instrument(skip_all, err)] fn generate_area_contests( relevant_plaintexts: &Vec<&Message>, @@ -355,6 +359,7 @@ fn generate_area_contests( Ok(almost_vec) } +/// Resolves plaintext artifacts into area-contest tally rows according to the contest encryption policy. #[instrument(skip_all, err)] async fn process_plaintexts( hasura_transaction: &Transaction<'_>, @@ -432,6 +437,7 @@ async fn process_plaintexts( Ok(filtered_area_contests) } +/// Maps a stored execution status string to a `TallyExecutionStatus` value. #[instrument] fn get_execution_status(execution_status: Option) -> Option { let Some(execution_status_str) = execution_status.clone() else { @@ -464,6 +470,15 @@ fn get_execution_status(execution_status: Option) -> Option, @@ -629,6 +649,7 @@ pub async fn upsert_ballots_messages( Ok(tally_session_contests_updated) } +/// Returns `tally_session.created_at` as Unix seconds, or an error when the timestamp is missing. fn get_tally_session_created_at_timestamp_secs(tally_session: &TallySession) -> Result { let Some(created_at) = &tally_session.created_at.clone() else { return Err(Error::String( @@ -638,6 +659,11 @@ fn get_tally_session_created_at_timestamp_secs(tally_session: &TallySession) -> Ok(created_at.timestamp()) } +/// Filters published tally sheets to those consistent with decrypted contest definitions. +/// +/// # Errors +/// +/// Returns an error when a row is malformed or fails [`validate_tally_sheet`]. #[instrument(skip_all, err)] pub fn clean_tally_sheets( tally_sheet_rows: &[TallySheet], @@ -685,6 +711,7 @@ pub fn clean_tally_sheets( .collect::>>() } +/// Plaintext tally inputs plus session metadata produced while mapping board state to DB rows. type PlaintextTallyDataBundle = ( Vec, i64, @@ -697,6 +724,11 @@ type PlaintextTallyDataBundle = ( TallySession, ); +/// Loads plaintexts, tally sheets, and cast-vote summaries needed for the rest of tally execution. +/// +/// # Errors +/// +/// Returns an error if any prerequisite query, deserialization, or validation step fails. #[instrument(skip_all, err)] async fn map_plaintext_data( hasura_transaction: &Transaction<'_>, @@ -1020,6 +1052,11 @@ async fn map_plaintext_data( ))) } +/// Resolves user and system HTML templates (and PDF options) for initialization or results reports. +/// +/// # Errors +/// +/// Returns an error when template assets cannot be read or custom templates fail to load. async fn build_reports_template_data( tally_type_enum: TallyType, tenant_id: String, @@ -1117,6 +1154,11 @@ async fn build_reports_template_data( Ok((report_content_template, report_system_template, pdf_options)) } +/// Core tally pipeline: plaintext processing, velvet tally, resolutions, logs, and optional reports. +/// +/// # Errors +/// +/// Returns an error on any failed ceremony step, board interaction, or persistence failure. #[instrument(err, skip(hasura_transaction, keycloak_transaction))] pub async fn execute_tally_session_wrapped( tenant_id: String, @@ -1389,6 +1431,11 @@ pub async fn execute_tally_session_wrapped( Ok(()) } +/// Runs [`execute_tally_session_wrapped`] inside paired Hasura and Keycloak transactions with rollback semantics. +/// +/// # Errors +/// +/// Returns an error from the wrapped tally run or from commit/rollback handling. #[instrument(err)] pub async fn transactions_wrapper( tenant_id: String, @@ -1453,59 +1500,70 @@ pub async fn transactions_wrapper( } } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 1_200_000, max_retries = 0, expires = 15)] -pub async fn execute_tally_session( - tenant_id: String, - election_event_id: String, - tally_session_id: String, - tally_type: Option, - election_ids: Option>, -) -> Result<()> { - let _permit = acquire_semaphore().await?; - let Ok(lock) = PgLock::acquire( - format!( - "execute_tally_session-{}-{}-{}", - tenant_id, election_event_id, tally_session_id - ), - Uuid::new_v4().to_string(), - ISO8601::now() - .checked_add_signed(Duration::seconds(120)) - .expect("tally session lock expiry overflow"), - ) - .await - else { - info!( - "Skipping: tally in progress for event {} and session id {}", - election_event_id, tally_session_id - ); - return Ok(()); - }; - let mut interval = tokio::time::interval(ChronoDuration::from_secs(30)); - let mut current_task = tokio::spawn(transactions_wrapper( - tenant_id.clone(), - election_event_id.clone(), - tally_session_id.clone(), - tally_type.clone(), - election_ids.clone(), - )); - let res = loop { - tokio::select! { - _ = interval.tick() => { - // Execute the callback function here - lock.update_expiry().await?; - } - res = &mut current_task => { - - break res.map_err(|err| Error::String(format!("Error executing loop: {:?}", err))).flatten(); +/// Celery entrypoint for running a tally session with locking, semaphore, and transaction boundaries. +mod execute_tally_session_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: executes a tally session. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 1_200_000, max_retries = 0, expires = 15)] + pub async fn execute_tally_session( + tenant_id: String, + election_event_id: String, + tally_session_id: String, + tally_type: Option, + election_ids: Option>, + ) -> Result<()> { + let _permit = acquire_semaphore().await?; + let Ok(lock) = PgLock::acquire( + format!( + "execute_tally_session-{}-{}-{}", + tenant_id, election_event_id, tally_session_id + ), + Uuid::new_v4().to_string(), + ISO8601::now() + .checked_add_signed(Duration::seconds(120)) + .expect("tally session lock expiry overflow"), + ) + .await + else { + info!( + "Skipping: tally in progress for event {} and session id {}", + election_event_id, tally_session_id + ); + return Ok(()); + }; + let mut interval = tokio::time::interval(ChronoDuration::from_secs(30)); + let mut current_task = tokio::spawn(transactions_wrapper( + tenant_id.clone(), + election_event_id.clone(), + tally_session_id.clone(), + tally_type.clone(), + election_ids.clone(), + )); + let res = loop { + tokio::select! { + _ = interval.tick() => { + lock.update_expiry().await?; + } + res = &mut current_task => { + break res + .map_err(|err| Error::String(format!("Error executing loop: {:?}", err))) + .flatten(); + } } - } - }; - lock.release().await?; - res + }; + lock.release().await?; + res + } } +pub use execute_tally_session_task::execute_tally_session; + #[cfg(test)] mod tests { diff --git a/packages/windmill/src/tasks/export_application.rs b/packages/windmill/src/tasks/export_application.rs index 87f7964948..96d43d6eae 100644 --- a/packages/windmill/src/tasks/export_application.rs +++ b/packages/windmill/src/tasks/export_application.rs @@ -2,6 +2,11 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Exports voter enrollment application archives. + +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] + use crate::{ services::{export::export_application::process_export, tasks_execution::update_fail}, types::error::{Error, Result}, @@ -11,6 +16,7 @@ use celery::error::TaskError; use sequent_core::types::hasura::core::TasksExecution; use tracing::{event, info, instrument, Level}; +/// Celery task: serialize voter enrollment applications to the export bundle for a document. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/export_ballot_publication.rs b/packages/windmill/src/tasks/export_ballot_publication.rs index 377cb4840f..2a012021e9 100644 --- a/packages/windmill/src/tasks/export_ballot_publication.rs +++ b/packages/windmill/src/tasks/export_ballot_publication.rs @@ -1,6 +1,12 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Exports ballot publication packages for distribution. + +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] + use crate::postgres::ballot_publication::get_ballot_publication_by_id; use crate::services::database::get_hasura_pool; use crate::services::export::export_ballot_publication::process_export_ballot_publication; @@ -14,6 +20,7 @@ use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; use tracing::{event, instrument, Level}; +/// Celery task: exports a ballot publication package into the document store. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/export_certificate_authority.rs b/packages/windmill/src/tasks/export_certificate_authority.rs index b975b2590b..1f58c181e8 100644 --- a/packages/windmill/src/tasks/export_certificate_authority.rs +++ b/packages/windmill/src/tasks/export_certificate_authority.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Exports certificate authority material for an election event. + use crate::postgres::certificate_authority::get_certificate_authorities_pem_by_ids; use crate::postgres::document::insert_document; use crate::services::database::get_hasura_pool; @@ -17,6 +19,7 @@ use tempfile::NamedTempFile; use tracing::instrument; use uuid::Uuid; +/// Create a document with `document_id` with the selected CA PEMs and upload to the private bucket. async fn export_certificate_authority_impl( tenant_id: String, election_event_id: Uuid, @@ -100,33 +103,48 @@ async fn export_certificate_authority_impl( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn export_certificate_authority( - tenant_id: String, - election_event_id: Uuid, - ids: Vec, - document_id: String, - task_execution: TasksExecution, -) -> Result<()> { - match export_certificate_authority_impl(tenant_id, election_event_id, ids, document_id.clone()) +mod export_certificate_authority_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: export selected certificate authorities as a PEM bundle to S3. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn export_certificate_authority( + tenant_id: String, + election_event_id: Uuid, + ids: Vec, + document_id: String, + task_execution: TasksExecution, + ) -> Result<()> { + match super::export_certificate_authority_impl( + tenant_id, + election_event_id, + ids, + document_id.clone(), + ) .await - { - Ok(()) => { - update_complete(&task_execution, Some(document_id)) - .await - .context("Failed to update task execution status to COMPLETED")?; - Ok(()) - } - Err(err) => { - if let Err(update_err) = update_fail(&task_execution, &format!("{err:?}")).await { - tracing::error!( - "Failed to update task execution status to FAILED: {:?}", - update_err - ); + { + Ok(()) => { + update_complete(&task_execution, Some(document_id)) + .await + .context("Failed to update task execution status to COMPLETED")?; + Ok(()) + } + Err(err) => { + if let Err(update_err) = update_fail(&task_execution, &format!("{err:?}")).await { + tracing::error!( + "Failed to update task execution status to FAILED: {:?}", + update_err + ); + } + Err(err.into()) } - Err(err.into()) } } } + +pub use export_certificate_authority_task::export_certificate_authority; diff --git a/packages/windmill/src/tasks/export_election_event.rs b/packages/windmill/src/tasks/export_election_event.rs index 5ffec352de..8c27d86fb4 100644 --- a/packages/windmill/src/tasks/export_election_event.rs +++ b/packages/windmill/src/tasks/export_election_event.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Exports election event configuration and related entities. + use crate::services::database::get_hasura_pool; use crate::services::export::export_election_event::process_export_zip; use crate::services::tasks_execution::*; @@ -12,96 +15,120 @@ use sequent_core::types::hasura::core::TasksExecution; use serde::{Deserialize, Serialize}; use tracing::{event, instrument, Level}; +/// Slice toggles for which election artifacts are included in the ZIP export. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ExportOptions { + /// Optional password when `is_encrypted` is true. pub password: Option, + /// When true, emit an encrypted archive instead of a plain ZIP. pub is_encrypted: bool, + /// Include voter data in the export. pub include_voters: bool, + /// Include activity logs in the export. pub activity_logs: bool, + /// Include bulletin board data and proofs. pub bulletin_board: bool, + /// Include ballot publication data. pub publications: bool, + /// Include S3 file listings and small assets. pub s3_files: bool, + /// Include scheduled events. pub scheduled_events: bool, + /// Include generated reports and attachments. pub reports: bool, + /// Include voter enrollment applications. pub applications: bool, + /// Include encrypted tally data and results. pub tally: bool, + /// Attach CA / TLS certificate material referenced by the event. pub include_certificates: bool, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn export_election_event( - tenant_id: String, - election_event_id: String, - export_config: ExportOptions, - document_id: String, - task_execution: TasksExecution, -) -> Result<()> { - let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { - Ok(client) => client, - Err(err) => { - let err_str = format!("Failed to get Hasura DB pool: {err:?}"); - if let Err(err) = update_fail(&task_execution, &err_str).await { - event!( - Level::ERROR, - "Failed to update task execution status to FAILED: {:?}", - err - ); +mod export_election_event_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: export an election event as a ZIP. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn export_election_event( + tenant_id: String, + election_event_id: String, + export_config: ExportOptions, + document_id: String, + task_execution: TasksExecution, + ) -> Result<()> { + let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { + Ok(client) => client, + Err(err) => { + let err_str = format!("Failed to get Hasura DB pool: {err:?}"); + if let Err(err) = update_fail(&task_execution, &err_str).await { + event!( + Level::ERROR, + "Failed to update task execution status to FAILED: {:?}", + err + ); + } + return Err(Error::String(err_str)); } - return Err(Error::String(err_str)); - } - }; + }; - let hasura_transaction = match hasura_db_client.transaction().await { - Ok(transaction) => transaction, - Err(err) => { - let err_str = format!("Failed to start Hasura transaction: {err:?}"); - if let Err(err) = update_fail(&task_execution, &err_str).await { - event!( - Level::ERROR, - "Failed to update task execution status to FAILED: {:?}", - err - ); + let hasura_transaction = match hasura_db_client.transaction().await { + Ok(transaction) => transaction, + Err(err) => { + let err_str = format!("Failed to start Hasura transaction: {err:?}"); + if let Err(err) = update_fail(&task_execution, &err_str).await { + event!( + Level::ERROR, + "Failed to update task execution status to FAILED: {:?}", + err + ); + } + return Err(Error::String(err_str)); } - return Err(Error::String(err_str)); - } - }; + }; - // Process the export - match process_export_zip(&tenant_id, &election_event_id, &document_id, export_config).await { - Ok(_) => (), - Err(err) => { - let err_str = format!("Failed to export election event data: {err:?}"); - if let Err(update_err) = update_fail(&task_execution, &err_str).await { - event!( - Level::ERROR, - "Failed to update task execution status to FAILED: {:?}", - update_err - ); + // Process the export + match process_export_zip(&tenant_id, &election_event_id, &document_id, export_config).await + { + Ok(_) => (), + Err(err) => { + let err_str = format!("Failed to export election event data: {err:?}"); + if let Err(update_err) = update_fail(&task_execution, &err_str).await { + event!( + Level::ERROR, + "Failed to update task execution status to FAILED: {:?}", + update_err + ); + } + return Err(Error::String(err_str)); } - return Err(Error::String(err_str)); } - } - match hasura_transaction.commit().await { - Ok(_) => (), - Err(err) => { - let err_str = format!("Commit failed: {err:?}"); - if let Err(err) = update_fail(&task_execution, &err_str).await { - event!( - Level::ERROR, - "Failed to update task execution status to FAILED: {:?}", - err - ); + match hasura_transaction.commit().await { + Ok(_) => (), + Err(err) => { + let err_str = format!("Commit failed: {err:?}"); + if let Err(err) = update_fail(&task_execution, &err_str).await { + event!( + Level::ERROR, + "Failed to update task execution status to FAILED: {:?}", + err + ); + } + return Err(Error::String(err_str)); } - return Err(Error::String(err_str)); - } - }; + }; - update_complete(&task_execution, Some(document_id.to_string())) - .await - .context("Failed to update task execution status to COMPLETED")?; + update_complete(&task_execution, Some(document_id.to_string())) + .await + .context("Failed to update task execution status to COMPLETED")?; - Ok(()) + Ok(()) + } } + +pub use export_election_event_task::export_election_event; diff --git a/packages/windmill/src/tasks/export_tally_results.rs b/packages/windmill/src/tasks/export_tally_results.rs index 0be4ec92cd..19215823e4 100644 --- a/packages/windmill/src/tasks/export_tally_results.rs +++ b/packages/windmill/src/tasks/export_tally_results.rs @@ -1,6 +1,12 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Exports tally outcome summaries and artifacts. + +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] + use crate::services::export::export_tally_results::{ export_tally_results_to_xlsx, get_tally_session_execution_results_sqlite_file, }; @@ -11,6 +17,7 @@ use celery::error::TaskError; use sequent_core::types::hasura::core::TasksExecution; use tracing::instrument; +/// Celery task: export tally session results as an XLSX document. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/export_tasks_execution.rs b/packages/windmill/src/tasks/export_tasks_execution.rs index cf99e2b457..637c7e456d 100644 --- a/packages/windmill/src/tasks/export_tasks_execution.rs +++ b/packages/windmill/src/tasks/export_tasks_execution.rs @@ -2,6 +2,11 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Exports task execution history for auditing. + +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] + use crate::services::database::{get_hasura_pool, get_keycloak_pool, PgConfig}; use crate::services::export::export_tasks_execution::process_export; use crate::types::error::{Error, Result}; @@ -11,6 +16,7 @@ use serde::{Deserialize, Serialize}; use std::io::{BufWriter, Write}; use tracing::{debug, info, instrument}; +/// Celery task: export task execution audit rows for an election event into a stored document. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/export_templates.rs b/packages/windmill/src/tasks/export_templates.rs index 52e3f978d0..4f573a5ba4 100644 --- a/packages/windmill/src/tasks/export_templates.rs +++ b/packages/windmill/src/tasks/export_templates.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Exports communication templates for a tenant. +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] use crate::types::error::Error; use crate::{ @@ -15,6 +18,7 @@ use celery::error::TaskError; use sequent_core::types::hasura::core::TasksExecution; use tracing::instrument; +/// Celery task: export reusable templates for the tenant into storage. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/export_tenant_config.rs b/packages/windmill/src/tasks/export_tenant_config.rs index 01be01c5b9..3c4e8a2182 100644 --- a/packages/windmill/src/tasks/export_tenant_config.rs +++ b/packages/windmill/src/tasks/export_tenant_config.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Exports tenant-level configuration documents. +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] use crate::services::database::get_hasura_pool; use crate::services::export::export_tenant_config::process_export_zip; @@ -13,6 +16,7 @@ use sequent_core::types::hasura::core::TasksExecution; use serde::{Deserialize, Serialize}; use tracing::{event, instrument, Level}; +/// Celery task: export the tenant configuration as a ZIP. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/export_trustees.rs b/packages/windmill/src/tasks/export_trustees.rs index 8f56f14e67..76ff125bee 100644 --- a/packages/windmill/src/tasks/export_trustees.rs +++ b/packages/windmill/src/tasks/export_trustees.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Exports trustee configuration for an event. + use crate::services::export::export_trustees::read_trustees_config; use crate::services::providers::transactions_provider::provide_hasura_transaction; use crate::types::error::{Error, Result as TaskResult}; @@ -11,6 +14,7 @@ use sequent_core::types::hasura::core::TasksExecution; use serde::{Deserialize, Serialize}; use tracing::{event, instrument, Level}; +/// Export trustees configuration using `password` to unwrap secrets. #[instrument(err)] async fn export_trustees_service( tenant_id: String, @@ -37,15 +41,25 @@ async fn export_trustees_service( .await } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn export_trustees_task( - tenant_id: String, - document_id: String, - password: String, - task_execution: TasksExecution, -) -> TaskResult<()> { - export_trustees_service(tenant_id, document_id, password, task_execution).await?; - Ok(()) +mod export_trustees_task_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: export trustee ceremony configuration using `password` to unwrap secrets. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn export_trustees_task( + tenant_id: String, + document_id: String, + password: String, + task_execution: TasksExecution, + ) -> TaskResult<()> { + super::export_trustees_service(tenant_id, document_id, password, task_execution).await?; + Ok(()) + } } + +pub use export_trustees_task_task::export_trustees_task; diff --git a/packages/windmill/src/tasks/export_users.rs b/packages/windmill/src/tasks/export_users.rs index 4b45a52cae..c76edd77a6 100644 --- a/packages/windmill/src/tasks/export_users.rs +++ b/packages/windmill/src/tasks/export_users.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Exports voters list for an event and tenant. use crate::postgres::document::insert_document; use crate::services::database::{get_hasura_pool, get_keycloak_pool, PgConfig}; use crate::services::documents::upload_and_return_document; @@ -17,138 +18,152 @@ use sequent_core::util; use serde::{Deserialize, Serialize}; use tracing::{debug, info, instrument}; +/// API response shape for the export users task. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ExportUsersOutput { + /// Document ID for the generated CSV. pub document_id: String, + /// Error message when the CSV could not be produced or uploaded. pub error_msg: Option, + /// Optional task execution row updated on completion. pub task_execution: Option, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn export_users( - body: ExportBody, - document_id: String, - task_execution: Option, -) -> Result<()> { - let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { - Ok(client) => client, - Err(err) => { - if let Some(task_execution) = &task_execution { - update_fail(task_execution, "Failed to get Hasura DB pool").await; +mod export_users_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: export voter rows to CSV, upload to the private bucket, and register a document. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn export_users( + body: ExportBody, + document_id: String, + task_execution: Option, + ) -> Result<()> { + let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { + Ok(client) => client, + Err(err) => { + if let Some(task_execution) = &task_execution { + update_fail(task_execution, "Failed to get Hasura DB pool").await; + } + return Err(Error::String(format!( + "Error getting Hasura DB pool: {}", + err + ))); } - return Err(Error::String(format!( - "Error getting Hasura DB pool: {}", - err - ))); - } - }; + }; - let hasura_transaction = match hasura_db_client.transaction().await { - Ok(transaction) => transaction, - Err(err) => { - if let Some(task_execution) = &task_execution { - update_fail(task_execution, "Failed to start Hasura transaction").await?; + let hasura_transaction = match hasura_db_client.transaction().await { + Ok(transaction) => transaction, + Err(err) => { + if let Some(task_execution) = &task_execution { + update_fail(task_execution, "Failed to start Hasura transaction").await?; + } + return Err(Error::String(format!( + "Error starting Hasura transaction: {err}" + ))); } - return Err(Error::String(format!( - "Error starting Hasura transaction: {err}" - ))); - } - }; - - // Export the users to a temporary file - let temp_path = match export_users_file(&hasura_transaction, body.clone()).await { - Ok(result) => result, - Err(err) => { - if let Some(task_execution) = &task_execution { - update_fail(task_execution, &err.to_string()).await?; + }; + + // Export the users to a temporary file + let temp_path = match export_users_file(&hasura_transaction, body.clone()).await { + Ok(result) => result, + Err(err) => { + if let Some(task_execution) = &task_execution { + update_fail(task_execution, &err.to_string()).await?; + } + return Err(Error::String(format!("Error listing users: {err:?}"))); } - return Err(Error::String(format!("Error listing users: {err:?}"))); - } - }; - let size = temp_path.metadata()?.len(); - - // Upload to S3 - let (tenant_id, election_event_id) = match &body { - ExportBody::Users { - tenant_id, - election_event_id, - .. - } => ( - tenant_id.to_string(), - election_event_id.clone().unwrap_or_default(), - ), - ExportBody::TenantUsers { tenant_id } => (tenant_id.to_string(), "".to_string()), - }; - - let timestamp = match util::date::timestamp() { - Ok(timestamp) => timestamp, - Err(err) => { - if let Some(task_execution) = &task_execution { - update_fail(task_execution, "Failed to obtain timestamp").await?; + }; + let size = temp_path.metadata()?.len(); + + // Upload to S3 + let (tenant_id, election_event_id) = match &body { + ExportBody::Users { + tenant_id, + election_event_id, + .. + } => ( + tenant_id.to_string(), + election_event_id.clone().unwrap_or_default(), + ), + ExportBody::TenantUsers { tenant_id } => (tenant_id.to_string(), "".to_string()), + }; + + let timestamp = match util::date::timestamp() { + Ok(timestamp) => timestamp, + Err(err) => { + if let Some(task_execution) = &task_execution { + update_fail(task_execution, "Failed to obtain timestamp").await?; + } + return Err(Error::String(format!("Error obtaining timestamp: {err}"))); } - return Err(Error::String(format!("Error obtaining timestamp: {err}"))); - } - }; - - let name = format!("users-export-{timestamp}.csv"); - let key = s3::get_document_key(&tenant_id, Some(&election_event_id), &document_id, &name); - - let media_type = "text/csv".to_string(); - - match s3::upload_file_to_s3( - key, - false, - s3::get_private_bucket()?, - media_type.clone(), - temp_path.to_string_lossy().to_string(), - None, - Some(name.clone()), - ) - .await - { - Ok(_) => (), - Err(err) => { - if let Some(task_execution) = &task_execution { - update_fail(task_execution, "Failed to upload file to s3").await?; + }; + + let name = format!("users-export-{timestamp}.csv"); + let key = s3::get_document_key(&tenant_id, Some(&election_event_id), &document_id, &name); + + let media_type = "text/csv".to_string(); + + match s3::upload_file_to_s3( + key, + false, + s3::get_private_bucket()?, + media_type.clone(), + temp_path.to_string_lossy().to_string(), + None, + Some(name.clone()), + ) + .await + { + Ok(_) => (), + Err(err) => { + if let Some(task_execution) = &task_execution { + update_fail(task_execution, "Failed to upload file to s3").await?; + } + return Err(Error::String(format!("Error uploading file to s3: {err}"))); } - return Err(Error::String(format!("Error uploading file to s3: {err}"))); } - } - - temp_path - .close() - .with_context(|| "Error closing temporary file path")?; - let _document = insert_document( - &hasura_transaction, - &tenant_id, - match &body { - ExportBody::Users { - election_event_id, .. - } => election_event_id.clone(), - ExportBody::TenantUsers { .. } => None, - }, - &name, - &media_type, - size.try_into()?, - false, - Some(document_id.clone()), - ) - .await - .map_err(|err| format!("Error inserting document: {:?}", err))?; - - if let Some(task_execution) = &task_execution { - update_complete(task_execution, Some(document_id.to_string())) - .await - .context("Failed to update task execution status to COMPLETED")?; - } + temp_path + .close() + .with_context(|| "Error closing temporary file path")?; - hasura_transaction - .commit() + let _document = insert_document( + &hasura_transaction, + &tenant_id, + match &body { + ExportBody::Users { + election_event_id, .. + } => election_event_id.clone(), + ExportBody::TenantUsers { .. } => None, + }, + &name, + &media_type, + size.try_into()?, + false, + Some(document_id.clone()), + ) .await - .with_context(|| "Failed to commit Hasura transaction")?; + .map_err(|err| format!("Error inserting document: {:?}", err))?; - Ok(()) + if let Some(task_execution) = &task_execution { + update_complete(task_execution, Some(document_id.to_string())) + .await + .context("Failed to update task execution status to COMPLETED")?; + } + + hasura_transaction + .commit() + .await + .with_context(|| "Failed to commit Hasura transaction")?; + + Ok(()) + } } + +pub use export_users_task::export_users; diff --git a/packages/windmill/src/tasks/generate_report.rs b/packages/windmill/src/tasks/generate_report.rs index 2eee3e5f0b..b59c895c18 100644 --- a/packages/windmill/src/tasks/generate_report.rs +++ b/packages/windmill/src/tasks/generate_report.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Renders report documents from templates and data. use crate::postgres::reports::Report; use crate::postgres::reports::ReportType; use crate::services::database::get_hasura_pool; @@ -29,6 +29,11 @@ use std::str::FromStr; use tracing::info; use tracing::instrument; +/// Runs the report-type-specific template renderer. +/// +/// # Errors +/// +/// Fails on DB pool or transaction errors, unknown report types, template rendering failures, or commit errors. pub async fn generate_report( report: Report, document_id: String, @@ -152,59 +157,73 @@ pub async fn generate_report( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn generate_report( - report: Report, - document_id: String, - report_mode: GenerateReportMode, - is_scheduled_task: bool, - task_execution: Option, - executer_username: Option, - tally_session_id: Option, -) -> Result<()> { - let _permit = acquire_semaphore().await?; - // Spawn the task using an async block - let task_execution_clone = task_execution.clone(); - let handle = tokio::task::spawn_blocking({ - move || { - tokio::runtime::Handle::current().block_on(async move { - generate_report( - report, - document_id, - report_mode, - is_scheduled_task, - task_execution_clone, - executer_username, - tally_session_id, - ) - .await - .map_err(|err| anyhow!("generate_report error: {:?}", err)) - }) - } - }); +mod generate_report_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - // Await the result and handle JoinError explicitly - match handle.await { - Ok(inner_result) => { - if let Err(ref err) = inner_result { - if let Some(ref task_exec) = task_execution { - let _ = update_fail(task_exec, &format!("Task failed: {:?}", err)).await; + use super::*; + + /// Celery task: acquires the reports semaphore and runs the report pipeline on the blocking pool. + /// + /// # Errors + /// + /// Fails if the semaphore cannot be acquired, the blocking worker returns an error, or the join handle errors. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn generate_report( + report: Report, + document_id: String, + report_mode: GenerateReportMode, + is_scheduled_task: bool, + task_execution: Option, + executer_username: Option, + tally_session_id: Option, + ) -> Result<()> { + let _permit = acquire_semaphore().await?; + // Spawn the task using an async block + let task_execution_clone = task_execution.clone(); + let handle = tokio::task::spawn_blocking({ + move || { + tokio::runtime::Handle::current().block_on(async move { + generate_report( + report, + document_id, + report_mode, + is_scheduled_task, + task_execution_clone, + executer_username, + tally_session_id, + ) + .await + .map_err(|err| anyhow!("generate_report error: {:?}", err)) + }) + } + }); + + // Await the result and handle JoinError explicitly + match handle.await { + Ok(inner_result) => { + if let Err(ref err) = inner_result { + if let Some(ref task_exec) = task_execution { + let _ = update_fail(task_exec, &format!("Task failed: {:?}", err)).await; + } } + inner_result.map_err(|err| Error::from(err.context("Task failed")))?; } - inner_result.map_err(|err| Error::from(err.context("Task failed")))?; - } - Err(join_error) => { - if let Some(ref task_exec) = task_execution { - let _ = update_fail(task_exec, &format!("Task panicked: {}", join_error)).await; + Err(join_error) => { + if let Some(ref task_exec) = task_execution { + let _ = update_fail(task_exec, &format!("Task panicked: {}", join_error)).await; + } + return Err(Error::from(anyhow::anyhow!( + "Task panicked: {}", + join_error + ))); } - return Err(Error::from(anyhow::anyhow!( - "Task panicked: {}", - join_error - ))); } - } - Ok(()) + Ok(()) + } } + +pub use generate_report_task::generate_report; diff --git a/packages/windmill/src/tasks/generate_template.rs b/packages/windmill/src/tasks/generate_template.rs index 6d586cccde..e2d03b8004 100644 --- a/packages/windmill/src/tasks/generate_template.rs +++ b/packages/windmill/src/tasks/generate_template.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Generates or previews communication templates. use crate::postgres::reports::get_reports_by_election_id; use crate::postgres::reports::ReportType; use crate::postgres::tally_session::get_tally_session_by_id; @@ -45,16 +45,26 @@ use velvet::config::ballot_images_config::PipeConfigBallotImages; use velvet::pipes::pipe_name::PipeName; use velvet::pipes::pipe_name::PipeNameOutputDir; +/// Templates type to be rendered. #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(tag = "type")] pub enum EGenerateTemplate { + /// Ballot-images document. BallotImages { + /// Election event that owns the tally. election_event_id: String, + /// Election whose ballot images are rendered. election_id: String, + /// Completed tally session. tally_session_id: String, }, } +/// Writes `velvet-config.json` for ballot-images rendering into the extracted tally directory. +/// +/// # Errors +/// +/// Fails on filesystem, env, S3 URL resolution, tally config build, or serialization errors. #[instrument(err, skip(hasura_transaction, tally_session))] async fn create_config( hasura_transaction: &Transaction<'_>, @@ -121,6 +131,11 @@ async fn create_config( Ok(first_pipe_id.to_string()) } +/// Produces ballot-images output for one document: downloads tally, runs velvet, zips HTML, optionally encrypts, uploads. +/// +/// # Errors +/// +/// Fails if the tally session is incomplete, archives cannot be read, velvet fails, or upload fails. #[instrument(err, skip(hasura_transaction))] async fn generate_template_document( hasura_transaction: &Transaction<'_>, @@ -267,6 +282,11 @@ async fn generate_template_document( Ok(()) } +/// Generates the template document. +/// +/// # Errors +/// +/// Fails on pool/transaction errors, document generation, task status updates, or commit failures. #[instrument(err)] async fn generate_template_block( tenant_id: String, @@ -338,39 +358,53 @@ async fn generate_template_block( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn generate_template( - tenant_id: String, - document_id: String, - input: EGenerateTemplate, - task_execution: Option, - executer_username: Option, -) -> Result<()> { - let _permit = acquire_semaphore().await?; - // Spawn the task using an async block - let handle = tokio::task::spawn_blocking({ - move || { - tokio::runtime::Handle::current().block_on(async move { - generate_template_block( - tenant_id, - document_id, - input, - task_execution, - executer_username, - ) - .await - .map_err(|err| anyhow!("generate_report error: {:?}", err)) - }) - } - }); +mod generate_template_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: rate-limits template jobs then runs the blocking template renderer. + /// + /// # Errors + /// + /// Fails on semaphore acquisition, worker errors, or join panics. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn generate_template( + tenant_id: String, + document_id: String, + input: EGenerateTemplate, + task_execution: Option, + executer_username: Option, + ) -> Result<()> { + let _permit = acquire_semaphore().await?; + // Spawn the task using an async block + let handle = tokio::task::spawn_blocking({ + move || { + tokio::runtime::Handle::current().block_on(async move { + generate_template_block( + tenant_id, + document_id, + input, + task_execution, + executer_username, + ) + .await + .map_err(|err| anyhow!("generate_report error: {:?}", err)) + }) + } + }); - // Await the result and handle JoinError explicitly - match handle.await { - Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), - Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), - }?; + // Await the result and handle JoinError explicitly + match handle.await { + Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), + Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), + }?; - Ok(()) + Ok(()) + } } + +pub use generate_template_task::generate_template; diff --git a/packages/windmill/src/tasks/import_application.rs b/packages/windmill/src/tasks/import_application.rs index f7c1eb9df3..49dc31b95d 100644 --- a/packages/windmill/src/tasks/import_application.rs +++ b/packages/windmill/src/tasks/import_application.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Imports an application archive into the platform. use crate::postgres::application::insert_applications; use crate::services::providers::transactions_provider::provide_hasura_transaction; use crate::{ @@ -21,44 +22,59 @@ use std::io::Seek; use tracing::{info, instrument}; use uuid::Uuid; -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 2)] -pub async fn import_applications( - tenant_id: String, - election_event_id: String, - document_id: String, - sha256: Option, - task_execution: TasksExecution, -) -> Result<()> { - let result = provide_hasura_transaction(|hasura_transaction| { - let document_copy = document_id.clone(); - Box::pin(async move { - import_applications_task( - hasura_transaction, - tenant_id, - election_event_id, - document_copy.clone(), - sha256, - ) - .await +mod import_applications_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: import voter enrollment applications from a CSV file. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 2)] + pub async fn import_applications( + tenant_id: String, + election_event_id: String, + document_id: String, + sha256: Option, + task_execution: TasksExecution, + ) -> Result<()> { + let result = provide_hasura_transaction(|hasura_transaction| { + let document_copy = document_id.clone(); + Box::pin(async move { + super::import_applications_task( + hasura_transaction, + tenant_id, + election_event_id, + document_copy.clone(), + sha256, + ) + .await + }) }) - }) - .await; + .await; - match result { - Ok(_) => { - let _res = update_complete(&task_execution, Some(document_id.clone())).await; - Ok(()) - } - Err(err) => { - let err_str = format!("Error importing applications: {err:?}"); - let _res = update_fail(&task_execution, &err.to_string()).await; - Err(err_str.into()) + match result { + Ok(_) => { + let _res = update_complete(&task_execution, Some(document_id.clone())).await; + Ok(()) + } + Err(err) => { + let err_str = format!("Error importing applications: {err:?}"); + let _res = update_fail(&task_execution, &err.to_string()).await; + Err(err_str.into()) + } } } } +pub use import_applications_task::import_applications; + +/// Parses the applications CSV from a document and inserts rows in one transaction. +/// +/// # Errors +/// +/// Fails on missing document, hash mismatch, CSV parse errors, or database insert errors. #[instrument(err)] pub async fn import_applications_task( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/tasks/import_areas.rs b/packages/windmill/src/tasks/import_areas.rs index dd75d25f28..568c352672 100644 --- a/packages/windmill/src/tasks/import_areas.rs +++ b/packages/windmill/src/tasks/import_areas.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Imports voting areas from a CSV document. use crate::postgres::area::insert_areas; use crate::postgres::area_contest::insert_area_contests; use crate::postgres::contest::export_contests; @@ -17,6 +17,11 @@ use std::io::Seek; use tracing::{error, info, instrument}; use uuid::Uuid; +/// Parses an areas CSV and inserts [`Area`] and [`AreaContest`] rows for the event. +/// +/// # Errors +/// +/// Returns an error if the document is missing, integrity checks fail, or CSV parsing fails. #[instrument(err)] pub async fn import_areas_task( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/tasks/import_candidates.rs b/packages/windmill/src/tasks/import_candidates.rs index d7ab562a60..22905ac064 100644 --- a/packages/windmill/src/tasks/import_candidates.rs +++ b/packages/windmill/src/tasks/import_candidates.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Imports candidate rows from a CSV document. use crate::postgres::candidate::insert_candidates; use crate::postgres::contest::export_contests; use crate::postgres::election_event::get_election_event_by_id; @@ -25,6 +25,7 @@ use std::io::Seek; use tracing::{event, info, instrument, Level}; use uuid::Uuid; +/// Maps a Comelec-style numeric party code from the import file to the display abbreviation used on the ballot. #[instrument(ret)] fn get_political_party_extension(political_party: &str) -> String { // Mapping of numbers to political parties @@ -231,6 +232,11 @@ fn get_political_party_extension(political_party: &str) -> String { } } +/// Resolves a ballot “postcode” column to a contest id by matching English aliases against configured contests. +/// +/// # Errors +/// +/// Fails if a contest presentation blob cannot be deserialized. #[instrument(ret, err, skip(contests))] fn get_contest_from_postcode(contests: &Vec, postcode: &str) -> Result> { // Mapping of postcodes to contest names @@ -281,181 +287,196 @@ fn get_contest_from_postcode(contests: &Vec, postcode: &str) -> Result< Ok(None) } -#[instrument(err)] -pub async fn import_candidates_task( - tenant_id: String, - election_event_id: String, - document_id: String, - task_execution: TasksExecution, - sha256: Option, -) -> Result<()> { - let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { - Ok(client) => client, - Err(err) => { - update_fail(&task_execution, "Failed to get Hasura DB pool").await?; - return Err(anyhow!("Error getting Hasura DB pool: {}", err)); - } - }; - - let hasura_transaction = match hasura_db_client.transaction().await { - Ok(transaction) => transaction, - Err(err) => { - update_fail(&task_execution, "Failed to start Hasura transaction").await?; - return Err(anyhow!("Error starting Hasura transaction: {err}")); - } - }; - - let document = match get_document(&hasura_transaction, &tenant_id, None, &document_id).await { - Ok(Some(document)) => document, - Ok(None) => { - update_fail(&task_execution, "Document not found").await?; - return Err(anyhow!("Document not found")); - } - Err(err) => { - update_fail(&task_execution, "Error obtaining the document").await?; - return Err(anyhow!("Error obtaining the document: {err:?}")); - } - }; +mod import_candidates_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - let contests = match export_contests(&hasura_transaction, &tenant_id, &election_event_id).await - { - Ok(contests) => contests, - Err(err) => { - update_fail(&task_execution, "Document not found").await?; - return Err(anyhow!("Error obtaining the contests: {err:?}")); - } - }; + use super::*; - let mut temp_file = match get_document_as_temp_file(&tenant_id, &document).await { - Ok(temp_file) => temp_file, - Err(err) => { - update_fail(&task_execution, "Document not found").await?; - return Err(anyhow!("Error obtaining the tmp document: {err:?}")); - } - }; + /// Celery task: parses a fixed-layout candidate CSV, maps parties and contests, and bulk-inserts [`Candidate`] rows. + /// + /// # Errors + /// + /// Fails on database, document, integrity, CSV parse, or insert errors; updates task execution to failed when applicable. + #[instrument(err)] + pub async fn import_candidates_task( + tenant_id: String, + election_event_id: String, + document_id: String, + task_execution: TasksExecution, + sha256: Option, + ) -> Result<()> { + let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { + Ok(client) => client, + Err(err) => { + update_fail(&task_execution, "Failed to get Hasura DB pool").await?; + return Err(anyhow!("Error getting Hasura DB pool: {}", err)); + } + }; - temp_file.rewind()?; + let hasura_transaction = match hasura_db_client.transaction().await { + Ok(transaction) => transaction, + Err(err) => { + update_fail(&task_execution, "Failed to start Hasura transaction").await?; + return Err(anyhow!("Error starting Hasura transaction: {err}")); + } + }; - match sha256 { - Some(hash) if !hash.is_empty() => match integrity_check(&temp_file, hash) { - Ok(_) => { - info!("Hash verified !"); + let document = match get_document(&hasura_transaction, &tenant_id, None, &document_id).await + { + Ok(Some(document)) => document, + Ok(None) => { + update_fail(&task_execution, "Document not found").await?; + return Err(anyhow!("Document not found")); } - Err(HashFileVerifyError::HashMismatch(input_hash, gen_hash)) => { - let err_str = format!("Failed to verify the integrity: Hash of voters file: {gen_hash} does not match with the input hash: {input_hash}"); - update_fail(&task_execution, &err_str).await?; - return Err(anyhow!(err_str)); + Err(err) => { + update_fail(&task_execution, "Error obtaining the document").await?; + return Err(anyhow!("Error obtaining the document: {err:?}")); } + }; + + let contests = + match export_contests(&hasura_transaction, &tenant_id, &election_event_id).await { + Ok(contests) => contests, + Err(err) => { + update_fail(&task_execution, "Document not found").await?; + return Err(anyhow!("Error obtaining the contests: {err:?}")); + } + }; + + let mut temp_file = match get_document_as_temp_file(&tenant_id, &document).await { + Ok(temp_file) => temp_file, Err(err) => { - let err_str = format!("Failed to verify the integrity: {err:?}"); - update_fail(&task_execution, &err_str).await?; - return Err(anyhow!(err_str)); + update_fail(&task_execution, "Document not found").await?; + return Err(anyhow!("Error obtaining the tmp document: {err:?}")); + } + }; + + temp_file.rewind()?; + + match sha256 { + Some(hash) if !hash.is_empty() => match integrity_check(&temp_file, hash) { + Ok(_) => { + info!("Hash verified !"); + } + Err(HashFileVerifyError::HashMismatch(input_hash, gen_hash)) => { + let err_str = format!("Failed to verify the integrity: Hash of voters file: {gen_hash} does not match with the input hash: {input_hash}"); + update_fail(&task_execution, &err_str).await?; + return Err(anyhow!(err_str)); + } + Err(err) => { + let err_str = format!("Failed to verify the integrity: {err:?}"); + update_fail(&task_execution, &err_str).await?; + return Err(anyhow!(err_str)); + } + }, + _ => { + info!("No hash provided, skipping integrity check"); } - }, - _ => { - info!("No hash provided, skipping integrity check"); } - } - let reader = BufReader::new(temp_file.as_file()); + let reader = BufReader::new(temp_file.as_file()); - // Decode the file using the specified encoding - let transcoded_reader = DecodeReaderBytesBuilder::new() - .encoding(Some(WINDOWS_1252)) // Use WINDOWS_1252 for encoding conversion - .build(reader); + // Decode the file using the specified encoding + let transcoded_reader = DecodeReaderBytesBuilder::new() + .encoding(Some(WINDOWS_1252)) // Use WINDOWS_1252 for encoding conversion + .build(reader); - // Read the first line of the file to get the columns - let mut rdr = csv::ReaderBuilder::new() - .delimiter(b',') - .has_headers(false) - .from_reader(transcoded_reader); + // Read the first line of the file to get the columns + let mut rdr = csv::ReaderBuilder::new() + .delimiter(b',') + .has_headers(false) + .from_reader(transcoded_reader); - let election_event = - get_election_event_by_id(&hasura_transaction, &tenant_id, &election_event_id).await?; - let default_lang = election_event.get_default_language(); + let election_event = + get_election_event_by_id(&hasura_transaction, &tenant_id, &election_event_id).await?; + let default_lang = election_event.get_default_language(); - let mut candidates: Vec = vec![]; - for result in rdr.records() { - match result.with_context(|| "Error reading CSV record") { - Ok(record) => { - event!(Level::INFO, "result {:?}", record); - let name_on_ballot = record.get(26).unwrap_or("Candidate").to_string(); - let political_party = record.get(7).unwrap_or("\\N").to_string(); - let postcode = record.get(2).unwrap_or("1").to_string(); + let mut candidates: Vec = vec![]; + for result in rdr.records() { + match result.with_context(|| "Error reading CSV record") { + Ok(record) => { + event!(Level::INFO, "result {:?}", record); + let name_on_ballot = record.get(26).unwrap_or("Candidate").to_string(); + let political_party = record.get(7).unwrap_or("\\N").to_string(); + let postcode = record.get(2).unwrap_or("1").to_string(); - let ext = get_political_party_extension(&political_party); - let contest_id_opt = get_contest_from_postcode(&contests, &postcode)?; - let Some(contest_id) = contest_id_opt else { - continue; - }; - let mut presentation = CandidatePresentation::new(); + let ext = get_political_party_extension(&political_party); + let contest_id_opt = get_contest_from_postcode(&contests, &postcode)?; + let Some(contest_id) = contest_id_opt else { + continue; + }; + let mut presentation = CandidatePresentation::new(); - let lang = default_lang.as_str(); + let lang = default_lang.as_str(); - presentation - .i18n - .get_or_insert_with(HashMap::new) - .entry(lang.to_string()) - .or_default() - .insert( - "name".to_string(), - Some(format!("{name_on_ballot} ({ext})")), - ); + presentation + .i18n + .get_or_insert_with(HashMap::new) + .entry(lang.to_string()) + .or_default() + .insert( + "name".to_string(), + Some(format!("{name_on_ballot} ({ext})")), + ); - let presentation_json = serde_json::to_value(&presentation)?; + let presentation_json = serde_json::to_value(&presentation)?; - let candidate = Candidate { - id: Uuid::new_v4().to_string(), - tenant_id: tenant_id.clone(), - election_event_id: election_event_id.clone(), - contest_id: Some(contest_id), - created_at: None, - last_updated_at: None, - labels: None, - annotations: None, - description: None, - r#type: None, - presentation: Some(presentation_json), - is_public: Some(true), - image_document_id: None, - external_id: None, - }; - candidates.push(candidate); - } - Err(err) => { - event!(Level::ERROR, "Error reading CSV record: {:?}", err); - update_fail(&task_execution, "Error reading CSV record").await?; - return Err(anyhow!("Error reading CSV record: {}", err)); + let candidate = Candidate { + id: Uuid::new_v4().to_string(), + tenant_id: tenant_id.clone(), + election_event_id: election_event_id.clone(), + contest_id: Some(contest_id), + created_at: None, + last_updated_at: None, + labels: None, + annotations: None, + description: None, + r#type: None, + presentation: Some(presentation_json), + is_public: Some(true), + image_document_id: None, + external_id: None, + }; + candidates.push(candidate); + } + Err(err) => { + event!(Level::ERROR, "Error reading CSV record: {:?}", err); + update_fail(&task_execution, "Error reading CSV record").await?; + return Err(anyhow!("Error reading CSV record: {}", err)); + } } } - } - match insert_candidates( - &hasura_transaction, - &tenant_id, - &election_event_id, - &candidates, - ) - .await - { - Ok(_) => (), - Err(err) => { - update_fail(&task_execution, "Error inserting candidates to db").await?; - return Err(anyhow!("Inserting candidates failed: {:?}", err)); + match insert_candidates( + &hasura_transaction, + &tenant_id, + &election_event_id, + &candidates, + ) + .await + { + Ok(_) => (), + Err(err) => { + update_fail(&task_execution, "Error inserting candidates to db").await?; + return Err(anyhow!("Inserting candidates failed: {:?}", err)); + } } - } - match hasura_transaction.commit().await { - Ok(_) => (), - Err(err) => { - update_fail(&task_execution, "Error updating db").await?; - return Err(anyhow!("Commit failed: {}", err)); - } - }; + match hasura_transaction.commit().await { + Ok(_) => (), + Err(err) => { + update_fail(&task_execution, "Error updating db").await?; + return Err(anyhow!("Commit failed: {}", err)); + } + }; - update_complete(&task_execution, Some(document_id.clone())) - .await - .context("Failed to update task execution status to COMPLETED")?; + update_complete(&task_execution, Some(document_id.clone())) + .await + .context("Failed to update task execution status to COMPLETED")?; - Ok(()) + Ok(()) + } } + +pub use import_candidates_task::import_candidates_task; diff --git a/packages/windmill/src/tasks/import_election_event.rs b/packages/windmill/src/tasks/import_election_event.rs index 72ffb1bcbb..6f8a5840e3 100644 --- a/packages/windmill/src/tasks/import_election_event.rs +++ b/packages/windmill/src/tasks/import_election_event.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Imports a packaged election event into an existing tenant. use crate::postgres::maintenance::vacuum_analyze_direct; use crate::services::providers::transactions_provider::provide_hasura_transaction; use crate::services::tasks_execution::{update_complete, update_fail}; @@ -15,53 +15,68 @@ use sequent_core::types::hasura::core::TasksExecution; use serde::{Deserialize, Serialize}; use tracing::{event, info, instrument, Level}; +/// Payload for restoring an election event from an uploaded export archive. #[derive(Deserialize, Debug, Clone, Serialize)] pub struct ImportElectionEventBody { + /// Tenant receiving the import. pub tenant_id: String, + /// Source document ID containing the ZIP. pub document_id: String, + /// Optional password when the archive is encrypted. pub password: Option, + /// When true, validate only without mutating Hasura rows. pub check_only: Option, + /// Expected SHA-256 of the document for integrity verification. pub sha256: Option, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn import_election_event( - object: ImportElectionEventBody, - election_event_id: String, - tenant_id: String, - task_execution: TasksExecution, -) -> Result<()> { - let result = provide_hasura_transaction(|hasura_transaction| { - let object = object.clone(); - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); +mod import_election_event_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + /// Celery task: import election event data from a document. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn import_election_event( + object: super::ImportElectionEventBody, + election_event_id: String, + tenant_id: String, + task_execution: TasksExecution, + ) -> Result<()> { + let result = provide_hasura_transaction(|hasura_transaction| { + let object = object.clone(); + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); - Box::pin(async move { - import_election_event_service::process_document( - hasura_transaction, - object, - election_event_id, - tenant_id, - ) - .await + Box::pin(async move { + import_election_event_service::process_document( + hasura_transaction, + object, + election_event_id, + tenant_id, + ) + .await + }) }) - }) - .await; + .await; - match &result { - Ok(_) => { - // Execute database maintenance - info!("Performing mainteinance after election event import."); - vacuum_analyze_direct().await?; - let _ = update_complete(&task_execution, Some(object.document_id.clone())).await; - Ok(()) - } - Err(error) => { - let err_str = format!("Error process election event document: {error}"); - let _ = update_fail(&task_execution, &err_str).await; - Err(err_str.into()) + match &result { + Ok(_) => { + // Execute database maintenance + info!("Performing mainteinance after election event import."); + vacuum_analyze_direct().await?; + let _ = update_complete(&task_execution, Some(object.document_id.clone())).await; + Ok(()) + } + Err(error) => { + let err_str = format!("Error process election event document: {error}"); + let _ = update_fail(&task_execution, &err_str).await; + Err(err_str.into()) + } } } } + +pub use import_election_event_task::import_election_event; diff --git a/packages/windmill/src/tasks/import_templates.rs b/packages/windmill/src/tasks/import_templates.rs index e7838e1c20..68086c39be 100644 --- a/packages/windmill/src/tasks/import_templates.rs +++ b/packages/windmill/src/tasks/import_templates.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Imports template definitions from CSV. use crate::postgres::template::insert_templates; use crate::services::providers::transactions_provider::provide_hasura_transaction; use crate::services::tasks_execution::{update_complete, update_fail}; @@ -19,6 +19,11 @@ use std::io::Seek; use tracing::{info, instrument}; use uuid::Uuid; +/// Imports templates from CSV. +/// +/// # Errors +/// +/// Returns an error when the document, integrity hash, or CSV parsing fails. #[instrument(err)] pub async fn import_templates( hasura_transaction: &Transaction<'_>, @@ -100,31 +105,41 @@ pub async fn import_templates( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn import_templates_task( - tenant_id: String, - document_id: String, - sha256: Option, - task_execution: TasksExecution, -) -> Result<()> { - let result = provide_hasura_transaction(|hasura_transaction| { - let document_copy = document_id.clone(); - Box::pin(async move { - import_templates(hasura_transaction, tenant_id, document_copy, sha256).await +mod import_templates_celery_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: import templates from a CSV file. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn import_templates_task( + tenant_id: String, + document_id: String, + sha256: Option, + task_execution: TasksExecution, + ) -> Result<()> { + let result = provide_hasura_transaction(|hasura_transaction| { + let document_copy = document_id.clone(); + Box::pin(async move { + import_templates(hasura_transaction, tenant_id, document_copy, sha256).await + }) }) - }) - .await; - match result { - Ok(_) => { - let _res = update_complete(&task_execution, Some(document_id.clone())).await; - Ok(()) - } - Err(err) => { - let err_str = format!("Error importing templates: {err:?}"); - let _res = update_fail(&task_execution, &err.to_string()).await; - Err(err_str.into()) + .await; + match result { + Ok(_) => { + let _res = update_complete(&task_execution, Some(document_id.clone())).await; + Ok(()) + } + Err(err) => { + let err_str = format!("Error importing templates: {err:?}"); + let _res = update_fail(&task_execution, &err.to_string()).await; + Err(err_str.into()) + } } } } + +pub use import_templates_celery_task::import_templates_task; diff --git a/packages/windmill/src/tasks/import_tenant_config.rs b/packages/windmill/src/tasks/import_tenant_config.rs index ebc3ed5127..a01f89303c 100644 --- a/packages/windmill/src/tasks/import_tenant_config.rs +++ b/packages/windmill/src/tasks/import_tenant_config.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Imports tenant configuration ZIP bundle. use crate::services::import::import_tenant_config::import_tenant_config_zip; use crate::services::providers::transactions_provider::provide_hasura_transaction; use crate::services::tasks_execution::{update_complete, update_fail}; @@ -15,40 +15,56 @@ use sequent_core::types::hasura::core::TasksExecution; use serde::{Deserialize, Serialize}; use tracing::{event, info, instrument, Level}; +/// Options for which tenant configuration slices are applied from the ZIP. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ImportOptions { + /// Restore tenant data. pub include_tenant: Option, + /// Restore Keycloak realm data. pub include_keycloak: Option, + /// Restore roles and permissions data. pub include_roles: Option, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn import_tenant_config( - object: ImportOptions, - tenant_id: String, - document_id: String, - sha256: Option, - task_execution: TasksExecution, -) -> Result<()> { - let task_execution_clone = task_execution.clone(); - - let object = object.clone(); - let tenant_id = tenant_id.clone(); - let task_execution = task_execution_clone.clone(); - - match import_tenant_config_zip(object, &tenant_id, &document_id, sha256).await { - Ok(_) => (), - Err(err) => { - update_fail(&task_execution, &err.to_string()).await?; - return Err(anyhow!("Error process tenant configuration documents: {:?}", err).into()); - } - }; - - update_complete(&task_execution, Some(document_id.to_string())) - .await - .context("Failed to update task execution status to COMPLETED")?; - - Ok(()) +mod import_tenant_config_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: unpack a tenant configuration archive into Hasura and related services. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn import_tenant_config( + object: super::ImportOptions, + tenant_id: String, + document_id: String, + sha256: Option, + task_execution: TasksExecution, + ) -> Result<()> { + let task_execution_clone = task_execution.clone(); + + let object = object.clone(); + let tenant_id = tenant_id.clone(); + let task_execution = task_execution_clone.clone(); + + match import_tenant_config_zip(object, &tenant_id, &document_id, sha256).await { + Ok(_) => (), + Err(err) => { + update_fail(&task_execution, &err.to_string()).await?; + return Err( + anyhow!("Error process tenant configuration documents: {:?}", err).into(), + ); + } + }; + + update_complete(&task_execution, Some(document_id.to_string())) + .await + .context("Failed to update task execution status to COMPLETED")?; + + Ok(()) + } } + +pub use import_tenant_config_task::import_tenant_config; diff --git a/packages/windmill/src/tasks/import_users.rs b/packages/windmill/src/tasks/import_users.rs index 4c02853592..31bc3ada1d 100644 --- a/packages/windmill/src/tasks/import_users.rs +++ b/packages/windmill/src/tasks/import_users.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Imports users from a CSV document with integrity checks. use crate::postgres::document::get_document; use crate::postgres::maintenance::vacuum_analyze_direct; use crate::services::database::get_hasura_pool; @@ -22,26 +22,41 @@ use serde::{Deserialize, Serialize}; use std::io::Seek; use tempfile::NamedTempFile; use tracing::{error, info, instrument}; + +/// Request body for importing users. #[derive(Deserialize, Debug, Clone, Serialize)] pub struct ImportUsersBody { + /// Tenant that owns the import document and receives the users. pub tenant_id: String, + /// storage document id. pub document_id: String, + /// When set, users are scoped to this election event; otherwise tenant-wide. pub election_event_id: Option, + /// When true, imported users receive admin capabilities per import rules. #[serde(default = "default_is_admin")] pub is_admin: bool, + /// Optional SHA-256 of the file; when present, the task verifies the download before import. pub sha256: Option, } +/// Default for [`ImportUsersBody::is_admin`] when the field is omitted in JSON. fn default_is_admin() -> bool { false } +/// Result payload carrying the task execution row created for this import. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ImportUsersOutput { + /// Task execution record used to report progress. pub task_execution: TasksExecution, } impl ImportUsersBody { + /// Downloads the configured document to a temp file. + /// + /// # Errors + /// + /// Fails if the document is missing, metadata cannot be read, or the S3 download fails. #[instrument(ret)] async fn get_s3_document_as_temp_file( &self, @@ -74,92 +89,106 @@ impl ImportUsersBody { } } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 2)] -pub async fn import_users(body: ImportUsersBody, task_execution: TasksExecution) -> Result<()> { - let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { - Ok(client) => client, - Err(err) => { - update_fail(&task_execution, "Failed to get Hasura DB pool").await?; - return Err(Error::String(format!( - "Error getting Hasura DB pool: {}", - err - ))); - } - }; - - let hasura_transaction = match hasura_db_client.transaction().await { - Ok(transaction) => transaction, - Err(err) => { - update_fail(&task_execution, "Failed to start Hasura transaction").await?; - return Err(Error::String(format!( - "Error starting Hasura transaction: {err}" - ))); - } - }; +mod import_users_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - let (mut voters_file, separator) = - match body.get_s3_document_as_temp_file(&hasura_transaction).await { - Ok(result) => result, + use super::*; + + /// Celery task: validates optional file hash, imports the users into Keycloak. + /// + /// # Errors + /// + /// Fails on database pool/transaction errors, missing document, hash mismatch, or import errors. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 2)] + pub async fn import_users(body: ImportUsersBody, task_execution: TasksExecution) -> Result<()> { + let mut hasura_db_client: DbClient = match get_hasura_pool().await.get().await { + Ok(client) => client, Err(err) => { - update_fail( - &task_execution, - "Error obtaining voters file from S3 as temp file", - ) - .await?; + update_fail(&task_execution, "Failed to get Hasura DB pool").await?; return Err(Error::String(format!( - "Error obtaining voters file from S3: {err}" + "Error getting Hasura DB pool: {}", + err ))); } }; - voters_file.rewind()?; - match body.sha256.clone() { - Some(hash) if !hash.is_empty() => match integrity_check(&voters_file, hash) { - Ok(_) => { - info!("Hash verified !"); - } - Err(HashFileVerifyError::HashMismatch(input_hash, gen_hash)) => { - let err_str = format!("Failed to verify the integrity: Hash of voters file: {gen_hash} does not match with the input hash: {input_hash}"); - update_fail(&task_execution, &err_str).await?; - return Err(Error::String(err_str)); - } + let hasura_transaction = match hasura_db_client.transaction().await { + Ok(transaction) => transaction, Err(err) => { - let err_str = format!("Failed to verify the integrity: {err:?}"); - update_fail(&task_execution, &err_str).await?; - return Err(err_str.into()); + update_fail(&task_execution, "Failed to start Hasura transaction").await?; + return Err(Error::String(format!( + "Error starting Hasura transaction: {err}" + ))); } - }, - _ => { - info!("No hash provided, skipping integrity check"); - } - } + }; - match import_users_file( - &hasura_transaction, - &voters_file, - separator, - body.election_event_id.clone(), - body.tenant_id, - body.is_admin, - ) - .await - { - Ok(_) => { - // Execute database maintenance - info!("Performing mainteinance after users import."); - vacuum_analyze_direct().await?; - } - Err(err) => { - update_fail(&task_execution, &err.to_string()).await?; - return Err(Error::String(format!("Error importing users file: {err}"))); + let (mut voters_file, separator) = + match body.get_s3_document_as_temp_file(&hasura_transaction).await { + Ok(result) => result, + Err(err) => { + update_fail( + &task_execution, + "Error obtaining voters file from S3 as temp file", + ) + .await?; + return Err(Error::String(format!( + "Error obtaining voters file from S3: {err}" + ))); + } + }; + voters_file.rewind()?; + + match body.sha256.clone() { + Some(hash) if !hash.is_empty() => match integrity_check(&voters_file, hash) { + Ok(_) => { + info!("Hash verified !"); + } + Err(HashFileVerifyError::HashMismatch(input_hash, gen_hash)) => { + let err_str = format!("Failed to verify the integrity: Hash of voters file: {gen_hash} does not match with the input hash: {input_hash}"); + update_fail(&task_execution, &err_str).await?; + return Err(Error::String(err_str)); + } + Err(err) => { + let err_str = format!("Failed to verify the integrity: {err:?}"); + update_fail(&task_execution, &err_str).await?; + return Err(err_str.into()); + } + }, + _ => { + info!("No hash provided, skipping integrity check"); + } } - } - update_complete(&task_execution, Some(body.document_id.clone())) + match import_users_file( + &hasura_transaction, + &voters_file, + separator, + body.election_event_id.clone(), + body.tenant_id, + body.is_admin, + ) .await - .context("Failed to update task execution status to COMPLETED")?; + { + Ok(_) => { + // Execute database maintenance + info!("Performing mainteinance after users import."); + vacuum_analyze_direct().await?; + } + Err(err) => { + update_fail(&task_execution, &err.to_string()).await?; + return Err(Error::String(format!("Error importing users file: {err}"))); + } + } - Ok(()) + update_complete(&task_execution, Some(body.document_id.clone())) + .await + .context("Failed to update task execution status to COMPLETED")?; + + Ok(()) + } } + +pub use import_users_task::import_users; diff --git a/packages/windmill/src/tasks/insert_election_event.rs b/packages/windmill/src/tasks/insert_election_event.rs index fbb89afe50..2fb7a3eb4e 100644 --- a/packages/windmill/src/tasks/insert_election_event.rs +++ b/packages/windmill/src/tasks/insert_election_event.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Inserts a new election event from structured input. use crate::postgres::election_event::update_bulletin_board; use crate::services::database::get_hasura_pool; use crate::services::election_event_board::BoardSerializable; @@ -26,6 +26,11 @@ use std::fs; use tokio_postgres::row::Row; use tracing::{event, instrument, Level}; +/// Inserts the election event, bulletin board, and marks the import task complete. +/// +/// # Errors +/// +/// Fails on DB pool/transaction errors, realm upsert, insert, or board wiring errors. #[instrument(err)] pub async fn insert_election_event_anyhow( object: CreateElectionEventInput, @@ -116,7 +121,9 @@ pub async fn insert_election_event_anyhow( .context("Failed to update task execution status to COMPLETED") } +/// GraphQL-shaped payload used when importing a new election event from the admin API or tasks. #[derive(Serialize, Deserialize, Debug, Clone)] +#[allow(missing_docs)] pub struct CreateElectionEventInput { pub id: Option, pub created_at: Option, @@ -140,15 +147,29 @@ pub struct CreateElectionEventInput { pub statistics: Option, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn insert_election_event_t( - object: CreateElectionEventInput, - id: String, - task_execution: TasksExecution, -) -> Result<()> { - insert_election_event_anyhow(object, id, task_execution).await?; +mod insert_election_event_t_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - Ok(()) + use super::*; + + /// Celery task: provisions realm and Hasura rows for a new election event import. + /// + /// # Errors + /// + /// Propagates any error returned from the import pipeline. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn insert_election_event_t( + object: CreateElectionEventInput, + id: String, + task_execution: TasksExecution, + ) -> Result<()> { + insert_election_event_anyhow(object, id, task_execution).await?; + + Ok(()) + } } + +pub use insert_election_event_t_task::insert_election_event_t; diff --git a/packages/windmill/src/tasks/insert_tenant.rs b/packages/windmill/src/tasks/insert_tenant.rs index 7323fadca9..1894a1eb01 100644 --- a/packages/windmill/src/tasks/insert_tenant.rs +++ b/packages/windmill/src/tasks/insert_tenant.rs @@ -1,8 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Insert a new tenant record and Keycloak realm. use crate::postgres::tenant::{ - get_tenant_by_id_if_exist, get_tenant_by_slug_if_exist, insert_tenant, + get_tenant_by_id_if_exist, get_tenant_by_slug_if_exist, insert_tenant as insert_tenant_row, }; use crate::services::database::get_hasura_pool; use crate::services::import::import_election_event::remove_keycloak_realm_secrets; @@ -22,6 +23,15 @@ use sequent_core::types::hasura::core::TasksExecution; use std::{env, fs}; use tracing::{event, instrument, Level}; +/// Reads the default Keycloak realm JSON from `KEYCLOAK_TENANT_REALM_CONFIG_PATH`. +/// +/// # Errors +/// +/// Returns an error if the file cannot be read or does not deserialize into a [`RealmRepresentation`]. +/// +/// # Panics +/// +/// Panics when `KEYCLOAK_TENANT_REALM_CONFIG_PATH` is not set. #[instrument(err)] pub fn read_default_tenant_realm() -> AnyhowResult { let realm_config_path = env::var("KEYCLOAK_TENANT_REALM_CONFIG_PATH") @@ -34,6 +44,11 @@ pub fn read_default_tenant_realm() -> AnyhowResult { }) } +/// Creates or updates the tenant Keycloak realm from the default template and refreshes JWKS. +/// +/// # Errors +/// +/// Propagates template load, secret stripping, JSON, Keycloak admin, or JWKS errors. #[instrument(err)] pub async fn upsert_keycloak_realm(tenant_id: &str, slug: &str) -> Result<()> { let mut default_tenant = read_default_tenant_realm()?; @@ -55,6 +70,11 @@ pub async fn upsert_keycloak_realm(tenant_id: &str, slug: &str) -> Result<()> { Ok(()) } +/// Inserts a tenant row when no row with the same id exists. +/// +/// # Errors +/// +/// Propagates Hasura lookup or insert failures. #[instrument(skip(hasura_transaction), err)] pub async fn insert_tenant_db( hasura_transaction: &Transaction<'_>, @@ -69,11 +89,16 @@ pub async fn insert_tenant_db( return Ok(()); } - insert_tenant(hasura_transaction, tenant_id, slug).await?; + insert_tenant_row(hasura_transaction, tenant_id, slug).await?; Ok(()) } +/// Returns whether a tenant with the given slug already exists. +/// +/// # Errors +/// +/// Propagates Hasura lookup failures. #[instrument(skip(hasura_transaction), err)] pub async fn check_tenant_exists(hasura_transaction: &Transaction<'_>, slug: &str) -> Result { // fetch tenant @@ -82,6 +107,11 @@ pub async fn check_tenant_exists(hasura_transaction: &Transaction<'_>, slug: &st Ok(found_tenant.is_some()) } +/// Full provisioning path: skip if slug taken, otherwise upsert Keycloak realm and insert the tenant, then commit. +/// +/// # Errors +/// +/// Propagates pool, transaction, Keycloak, insert, or commit failures (surfaced as strings). #[instrument(err)] pub async fn process_insert_tenant(tenant_id: String, slug: String) -> Result<()> { let mut hasura_db_client: DbClient = get_hasura_pool() @@ -112,28 +142,38 @@ pub async fn process_insert_tenant(tenant_id: String, slug: String) -> Result<() Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn insert_tenant( - tenant_id: String, - slug: String, - task_execution: Option, -) -> Result<()> { - let res = process_insert_tenant(tenant_id.clone(), slug.clone()).await; - if let Some(task_execution) = task_execution { - if let Err(err) = res { - let err_str = format!("Error inserting tenant: {}", err); - event!(Level::ERROR, err_str); - update_fail(&task_execution, &err_str) +mod insert_tenant_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: provisions tenant realm and DB row, optionally updating the linked task execution. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn insert_tenant( + tenant_id: String, + slug: String, + task_execution: Option, + ) -> Result<()> { + let res = process_insert_tenant(tenant_id.clone(), slug.clone()).await; + if let Some(task_execution) = task_execution { + if let Err(err) = res { + let err_str = format!("Error inserting tenant: {}", err); + event!(Level::ERROR, err_str); + update_fail(&task_execution, &err_str) + .await + .context("Failed to update task insert tenant to FAILED")?; + return Err(err); + } + update_complete(&task_execution, None) .await - .context("Failed to update task insert tenant to FAILED")?; - return Err(err); + .context("Failed to update task execution status to COMPLETED")?; } - update_complete(&task_execution, None) - .await - .context("Failed to update task execution status to COMPLETED")?; - } - Ok(()) + Ok(()) + } } + +pub use insert_tenant_task::insert_tenant; diff --git a/packages/windmill/src/tasks/manage_election_allow_tally.rs b/packages/windmill/src/tasks/manage_election_allow_tally.rs index d59eccfbd3..98c28857f5 100644 --- a/packages/windmill/src/tasks/manage_election_allow_tally.rs +++ b/packages/windmill/src/tasks/manage_election_allow_tally.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Scheduled task that flips allow-tally based on a scheduled event. use crate::postgres::election::{get_election_by_id, update_election_voting_status}; use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::scheduled_event::*; @@ -25,6 +25,7 @@ use tracing::instrument; use tracing::{error, event, info, Level}; use uuid::Uuid; +/// Updates election status to allow tally and stops the triggering scheduled event. #[instrument(err)] async fn manage_election_allow_tally_wrapped( hasura_transaction: &Transaction<'_>, @@ -89,35 +90,44 @@ async fn manage_election_allow_tally_wrapped( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_allow_tally( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, - election_id: String, -) -> Result<()> { - let res = provide_hasura_transaction(|hasura_transaction| { - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); - let scheduled_event_id = scheduled_event_id.clone(); - let election_id = election_id.clone(); - Box::pin(async move { - // Your async code here - manage_election_allow_tally_wrapped( - hasura_transaction, - tenant_id, - election_event_id, - scheduled_event_id, - election_id, - ) - .await +mod manage_election_allow_tally_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: manages the election allow tally. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_allow_tally( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + election_id: String, + ) -> Result<()> { + let res = provide_hasura_transaction(|hasura_transaction| { + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); + let scheduled_event_id = scheduled_event_id.clone(); + let election_id = election_id.clone(); + Box::pin(async move { + manage_election_allow_tally_wrapped( + hasura_transaction, + tenant_id, + election_event_id, + scheduled_event_id, + election_id, + ) + .await + }) }) - }) - .await; + .await; - info!("result: {:?}", res); + info!("result: {:?}", res); - Ok(res?) + Ok(res?) + } } + +pub use manage_election_allow_tally_task::manage_election_allow_tally; diff --git a/packages/windmill/src/tasks/manage_election_dates.rs b/packages/windmill/src/tasks/manage_election_dates.rs index 715caf50ed..4acde32188 100644 --- a/packages/windmill/src/tasks/manage_election_dates.rs +++ b/packages/windmill/src/tasks/manage_election_dates.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Updates election status based on scheduled events. use crate::postgres::election::get_election_by_id; use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::scheduled_event::*; @@ -24,6 +24,7 @@ use tracing::instrument; use tracing::{error, event, info, Level}; use uuid::Uuid; +/// Applies scheduled voting open/close status for one election. #[instrument(err)] async fn manage_election_date_wrapper( hasura_transaction: &Transaction<'_>, @@ -108,52 +109,62 @@ async fn manage_election_date_wrapper( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_date( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, - election_id: String, -) -> Result<()> { - let lock: PgLock = PgLock::acquire( - format!( - "execute_manage_election_date-{}-{}-{}-{}", - tenant_id, election_event_id, scheduled_event_id, election_id - ), - Uuid::new_v4().to_string(), - ISO8601::now() - .checked_add_signed(Duration::seconds(120)) - .expect("manage_election_date lock expiry overflow"), - ) - .await - .with_context(|| "Error acquiring pglock")?; - - let res = provide_hasura_transaction(|hasura_transaction| { - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); - let scheduled_event_id = scheduled_event_id.clone(); - let election_id = election_id.clone(); - Box::pin(async move { - // Your async code here - manage_election_date_wrapper( - hasura_transaction, - tenant_id, - election_event_id, - scheduled_event_id, - election_id, - ) - .await +mod manage_election_date_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: manages the election scheduled dates for open/close voting. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_date( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + election_id: String, + ) -> Result<()> { + let lock: PgLock = PgLock::acquire( + format!( + "execute_manage_election_date-{}-{}-{}-{}", + tenant_id, election_event_id, scheduled_event_id, election_id + ), + Uuid::new_v4().to_string(), + ISO8601::now() + .checked_add_signed(Duration::seconds(120)) + .expect("manage_election_date lock expiry overflow"), + ) + .await + .with_context(|| "Error acquiring pglock")?; + + let res = provide_hasura_transaction(|hasura_transaction| { + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); + let scheduled_event_id = scheduled_event_id.clone(); + let election_id = election_id.clone(); + Box::pin(async move { + // Your async code here + manage_election_date_wrapper( + hasura_transaction, + tenant_id, + election_event_id, + scheduled_event_id, + election_id, + ) + .await + }) }) - }) - .await; + .await; - info!("result: {:?}", res); + info!("result: {:?}", res); - lock.release() - .await - .with_context(|| "Error releasing pglock")?; + lock.release() + .await + .with_context(|| "Error releasing pglock")?; - Ok(res?) + Ok(res?) + } } + +pub use manage_election_date_task::manage_election_date; diff --git a/packages/windmill/src/tasks/manage_election_event_date.rs b/packages/windmill/src/tasks/manage_election_event_date.rs index d1039c385a..ac4cf4c06e 100644 --- a/packages/windmill/src/tasks/manage_election_event_date.rs +++ b/packages/windmill/src/tasks/manage_election_event_date.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Opens or closes voting for an election event on schedule. use crate::postgres::scheduled_event::*; use crate::services::database::get_hasura_pool; use crate::services::election_event_status::update_event_voting_status; @@ -20,6 +20,12 @@ use tracing::instrument; use tracing::{event, info, Level}; use uuid::Uuid; +/// Opens or closes online voting for the whole election event. +/// +/// # Errors +/// +/// Fails if the scheduled event or its processor is missing, invalid, +/// or if updating voting status or stopping the event in the database fails. #[instrument(err)] pub async fn manage_election_event_date_wrapped( hasura_transaction: &Transaction<'_>, @@ -69,55 +75,65 @@ pub async fn manage_election_event_date_wrapped( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_event_date( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, -) -> Result<()> { - let lock: PgLock = PgLock::acquire( - format!( - "execute_manage_election_event_date-{}-{}-{}", - tenant_id, election_event_id, scheduled_event_id - ), - Uuid::new_v4().to_string(), - ISO8601::now() - .checked_add_signed(Duration::seconds(120)) - .expect("manage_election_event_date lock expiry overflow"), - ) - .await?; - let mut hasura_db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .map_err(|e| anyhow!("Error getting hasura client {}", e))?; - let hasura_transaction = hasura_db_client.transaction().await?; - let res = manage_election_event_date_wrapped( - &hasura_transaction, - tenant_id.clone(), - election_event_id.clone(), - scheduled_event_id.clone(), - ) - .await; +mod manage_election_event_date_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - match res { - Ok(data) => { - let commit = hasura_transaction - .commit() - .await - .map_err(|e| anyhow!("Commit failed manage_event_election_dates: {}", e)); - lock.release().await?; - commit?; - } - Err(err) => { - let rollback = hasura_transaction.rollback().await; - lock.release().await?; - rollback?; - return Err(anyhow!("{}", err).into()); + use super::*; + + /// Celery task: manages the election event scheduled dates for open/close voting. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_event_date( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + ) -> Result<()> { + let lock: PgLock = PgLock::acquire( + format!( + "execute_manage_election_event_date-{}-{}-{}", + tenant_id, election_event_id, scheduled_event_id + ), + Uuid::new_v4().to_string(), + ISO8601::now() + .checked_add_signed(Duration::seconds(120)) + .expect("manage_election_event_date lock expiry overflow"), + ) + .await?; + let mut hasura_db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .map_err(|e| anyhow!("Error getting hasura client {}", e))?; + let hasura_transaction = hasura_db_client.transaction().await?; + let res = manage_election_event_date_wrapped( + &hasura_transaction, + tenant_id.clone(), + election_event_id.clone(), + scheduled_event_id.clone(), + ) + .await; + + match res { + Ok(data) => { + let commit = hasura_transaction + .commit() + .await + .map_err(|e| anyhow!("Commit failed manage_event_election_dates: {}", e)); + lock.release().await?; + commit?; + } + Err(err) => { + let rollback = hasura_transaction.rollback().await; + lock.release().await?; + rollback?; + return Err(anyhow!("{}", err).into()); + } } - } - Ok(()) + Ok(()) + } } + +pub use manage_election_event_date_task::manage_election_event_date; diff --git a/packages/windmill/src/tasks/manage_election_event_enrollment.rs b/packages/windmill/src/tasks/manage_election_event_enrollment.rs index 23e5e80e9f..13aa5a3c2d 100644 --- a/packages/windmill/src/tasks/manage_election_event_enrollment.rs +++ b/packages/windmill/src/tasks/manage_election_event_enrollment.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Manages enrollment windows for an election event. use crate::postgres::election_event::{ get_election_event_by_id, update_election_event_presentation, }; @@ -21,6 +21,11 @@ use serde::{Deserialize, Serialize}; use tracing::instrument; use tracing::{error, event, info, Level}; +/// Updates Keycloak browser-flow OTP requirements for the election event realm. +/// +/// # Errors +/// +/// Returns an error if realm metadata or Keycloak admin API calls fail. pub async fn update_keycloak_otp( tenant_id: Option, election_event_id: Option, @@ -76,6 +81,11 @@ pub async fn update_keycloak_otp( Ok(()) } +/// Enables or disables voter enrollment flows in the event Keycloak realm. +/// +/// # Errors +/// +/// Returns an error if the realm cannot be resolved or Keycloak rejects the update. pub async fn update_keycloak_enrollment( tenant_id: Option, election_event_id: Option, @@ -116,6 +126,12 @@ pub async fn update_keycloak_enrollment( Ok(()) } +/// Aligns Hasura presentation enrollment flags and Keycloak with a scheduled enrollment window. +/// +/// # Errors +/// +/// Fails if the scheduled event or election cannot be loaded, Keycloak updates fail, +/// deserialization fails, or stopping the event fails. #[instrument(err)] pub async fn manage_election_event_enrollment_wrapped( hasura_transaction: &Transaction<'_>, @@ -179,30 +195,39 @@ pub async fn manage_election_event_enrollment_wrapped( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_event_enrollment( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, -) -> Result<()> { - provide_hasura_transaction(|hasura_transaction| { - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); - let scheduled_event_id = scheduled_event_id.clone(); - Box::pin(async move { - // Your async code here - manage_election_event_enrollment_wrapped( - hasura_transaction, - tenant_id, - election_event_id, - scheduled_event_id, - ) - .await +mod manage_election_event_enrollment_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: manages the election event scheduled enrollment. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_event_enrollment( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + ) -> Result<()> { + provide_hasura_transaction(|hasura_transaction| { + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); + let scheduled_event_id = scheduled_event_id.clone(); + Box::pin(async move { + manage_election_event_enrollment_wrapped( + hasura_transaction, + tenant_id, + election_event_id, + scheduled_event_id, + ) + .await + }) }) - }) - .await?; + .await?; - Ok(()) + Ok(()) + } } + +pub use manage_election_event_enrollment_task::manage_election_event_enrollment; diff --git a/packages/windmill/src/tasks/manage_election_event_lockdown.rs b/packages/windmill/src/tasks/manage_election_event_lockdown.rs index b5fc5c3203..d27a0a0312 100644 --- a/packages/windmill/src/tasks/manage_election_event_lockdown.rs +++ b/packages/windmill/src/tasks/manage_election_event_lockdown.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Applies lockdown presentation flags when a scheduled lockdown starts or ends. use crate::postgres::election_event::{ get_election_event_by_id, update_election_event_presentation, }; @@ -26,6 +26,7 @@ use tracing::instrument; use tracing::{error, event, info, Level}; use uuid::Uuid; +/// Updates election event presentation lockdown flags based on the scheduled event. #[instrument(err)] async fn manage_election_event_lockdown_wrapped( hasura_transaction: &Transaction<'_>, @@ -80,32 +81,41 @@ async fn manage_election_event_lockdown_wrapped( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_event_lockdown( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, -) -> Result<()> { - let res = provide_hasura_transaction(|hasura_transaction| { - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); - let scheduled_event_id = scheduled_event_id.clone(); - Box::pin(async move { - // Your async code here - manage_election_event_lockdown_wrapped( - hasura_transaction, - tenant_id, - election_event_id, - scheduled_event_id, - ) - .await +mod manage_election_event_lockdown_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: manages the election event scheduled lockdown. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_event_lockdown( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + ) -> Result<()> { + let res = provide_hasura_transaction(|hasura_transaction| { + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); + let scheduled_event_id = scheduled_event_id.clone(); + Box::pin(async move { + manage_election_event_lockdown_wrapped( + hasura_transaction, + tenant_id, + election_event_id, + scheduled_event_id, + ) + .await + }) }) - }) - .await; + .await; - info!("result: {:?}", res); + info!("result: {:?}", res); - Ok(res?) + Ok(res?) + } } + +pub use manage_election_event_lockdown_task::manage_election_event_lockdown; diff --git a/packages/windmill/src/tasks/manage_election_init_report.rs b/packages/windmill/src/tasks/manage_election_init_report.rs index 83e915fcd6..1ad63998d1 100644 --- a/packages/windmill/src/tasks/manage_election_init_report.rs +++ b/packages/windmill/src/tasks/manage_election_init_report.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Marks initialization report as allowed or disallowed based on a scheduled event. use crate::postgres::election::{get_election_by_id, update_election_voting_status}; use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::scheduled_event::*; @@ -25,6 +25,7 @@ use tracing::instrument; use tracing::{error, event, info, Level}; use uuid::Uuid; +/// Marks initialization report as allowed or disallowed based on the scheduled event. #[instrument(err)] async fn manage_election_init_report_wrapped( hasura_transaction: &Transaction<'_>, @@ -100,35 +101,44 @@ async fn manage_election_init_report_wrapped( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_init_report( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, - election_id: String, -) -> Result<()> { - let res = provide_hasura_transaction(|hasura_transaction| { - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); - let scheduled_event_id = scheduled_event_id.clone(); - let election_id = election_id.clone(); - Box::pin(async move { - // Your async code here - manage_election_init_report_wrapped( - hasura_transaction, - tenant_id, - election_event_id, - scheduled_event_id, - election_id, - ) - .await +mod manage_election_init_report_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: manages the election initialization report. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_init_report( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + election_id: String, + ) -> Result<()> { + let res = provide_hasura_transaction(|hasura_transaction| { + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); + let scheduled_event_id = scheduled_event_id.clone(); + let election_id = election_id.clone(); + Box::pin(async move { + manage_election_init_report_wrapped( + hasura_transaction, + tenant_id, + election_event_id, + scheduled_event_id, + election_id, + ) + .await + }) }) - }) - .await; + .await; - info!("result: {:?}", res); + info!("result: {:?}", res); - Ok(res?) + Ok(res?) + } } + +pub use manage_election_init_report_task::manage_election_init_report; diff --git a/packages/windmill/src/tasks/manage_election_voting_period_end.rs b/packages/windmill/src/tasks/manage_election_voting_period_end.rs index b938f5b86b..68495973dc 100644 --- a/packages/windmill/src/tasks/manage_election_voting_period_end.rs +++ b/packages/windmill/src/tasks/manage_election_voting_period_end.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Updates election presentation voting-period-end flags based on a scheduled event. use crate::postgres::election::{get_election_by_id, update_election_presentation}; use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::scheduled_event::*; @@ -25,6 +25,7 @@ use tracing::instrument; use tracing::{error, event, info, Level}; use uuid::Uuid; +/// Updates election presentation voting-period-end flags based on the scheduled event. #[instrument(err)] async fn manage_election_voting_period_end_wrapped( hasura_transaction: &Transaction<'_>, @@ -94,52 +95,61 @@ async fn manage_election_voting_period_end_wrapped( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn manage_election_voting_period_end( - tenant_id: String, - election_event_id: String, - scheduled_event_id: String, - election_id: String, -) -> Result<()> { - let lock: PgLock = PgLock::acquire( - format!( - "execute_manage_election_voting_period_end-{}-{}-{}-{}", - tenant_id, election_event_id, scheduled_event_id, election_id - ), - Uuid::new_v4().to_string(), - ISO8601::now() - .checked_add_signed(Duration::seconds(120)) - .expect("manage_election_voting_period_end lock expiry overflow"), - ) - .await - .with_context(|| "Error acquiring pglock")?; - - let res = provide_hasura_transaction(|hasura_transaction| { - let tenant_id = tenant_id.clone(); - let election_event_id = election_event_id.clone(); - let scheduled_event_id = scheduled_event_id.clone(); - let election_id = election_id.clone(); - Box::pin(async move { - // Your async code here - manage_election_voting_period_end_wrapped( - hasura_transaction, - tenant_id, - election_event_id, - scheduled_event_id, - election_id, - ) - .await - }) - }) - .await; +mod manage_election_voting_period_end_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - info!("result: {:?}", res); + use super::*; - lock.release() + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn manage_election_voting_period_end( + tenant_id: String, + election_event_id: String, + scheduled_event_id: String, + election_id: String, + ) -> Result<()> { + let lock: PgLock = PgLock::acquire( + format!( + "execute_manage_election_voting_period_end-{}-{}-{}-{}", + tenant_id, election_event_id, scheduled_event_id, election_id + ), + Uuid::new_v4().to_string(), + ISO8601::now() + .checked_add_signed(Duration::seconds(120)) + .expect("manage_election_voting_period_end lock expiry overflow"), + ) .await - .with_context(|| "Error releasing pglock")?; + .with_context(|| "Error acquiring pglock")?; + + let res = provide_hasura_transaction(|hasura_transaction| { + let tenant_id = tenant_id.clone(); + let election_event_id = election_event_id.clone(); + let scheduled_event_id = scheduled_event_id.clone(); + let election_id = election_id.clone(); + Box::pin(async move { + // Your async code here + manage_election_voting_period_end_wrapped( + hasura_transaction, + tenant_id, + election_event_id, + scheduled_event_id, + election_id, + ) + .await + }) + }) + .await; - Ok(res?) + info!("result: {:?}", res); + + lock.release() + .await + .with_context(|| "Error releasing pglock")?; + + Ok(res?) + } } + +pub use manage_election_voting_period_end_task::manage_election_voting_period_end; diff --git a/packages/windmill/src/tasks/manual_verification_report.rs b/packages/windmill/src/tasks/manual_verification_report.rs index 58c3ae2602..3889494896 100644 --- a/packages/windmill/src/tasks/manual_verification_report.rs +++ b/packages/windmill/src/tasks/manual_verification_report.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Builds manual verification PDFs for a single voter. use crate::postgres::reports::Report; use crate::services::database::{get_hasura_pool, get_keycloak_pool, PgConfig}; use crate::services::reports::manual_verification::ManualVerificationTemplate; @@ -15,6 +15,11 @@ use celery::error::TaskError; use deadpool_postgres::{Client as DbClient, Transaction}; use tracing::instrument; +/// Generates a manual verification report into a document. +/// +/// # Errors +/// +/// Returns an error if pools, transactions, or template rendering fail. #[instrument(err)] pub async fn generate_report( document_id: &str, @@ -53,7 +58,7 @@ pub async fn generate_report( template_alias: None, voter_id: Some(voter_id.to_string()), report_origin: ReportOriginatedFrom::ExportFunction, - executer_username: None, //TODO: fix? + executer_username: None, tally_session_id: None, }); @@ -81,38 +86,47 @@ pub async fn generate_report( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn generate_manual_verification_report( - document_id: String, - tenant_id: String, - election_event_id: String, - voter_id: String, - report: Option, -) -> Result<()> { - // Spawn the task using an async block - let handle = tokio::task::spawn_blocking({ - move || { - tokio::runtime::Handle::current().block_on(async move { - generate_report( - &document_id, - &tenant_id, - &election_event_id, - &voter_id, - GenerateReportMode::REAL, - report, - ) - .await - }) - } - }); +mod manual_verification_report_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - // Await the result and handle JoinError explicitly - match handle.await { - Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), - Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), - }?; + use super::*; - Ok(()) + /// Celery task: generate a manual verification report. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn generate_manual_verification_report( + document_id: String, + tenant_id: String, + election_event_id: String, + voter_id: String, + report: Option, + ) -> Result<()> { + let handle = tokio::task::spawn_blocking({ + move || { + tokio::runtime::Handle::current().block_on(async move { + generate_report( + &document_id, + &tenant_id, + &election_event_id, + &voter_id, + GenerateReportMode::REAL, + report, + ) + .await + }) + } + }); + + // Await the result and handle JoinError explicitly + match handle.await { + Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), + Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), + }?; + + Ok(()) + } } + +pub use manual_verification_report_task::generate_manual_verification_report; diff --git a/packages/windmill/src/tasks/miru_plugin_tasks.rs b/packages/windmill/src/tasks/miru_plugin_tasks.rs index 025d8eb3a0..cbe5baf231 100644 --- a/packages/windmill/src/tasks/miru_plugin_tasks.rs +++ b/packages/windmill/src/tasks/miru_plugin_tasks.rs @@ -1,105 +1,121 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only -use crate::services::consolidation::create_transmission_package_service::create_transmission_package_service; -use crate::services::consolidation::send_transmission_package_service::send_transmission_package_service; +//! Miru consolidation tasks (transmission packages and signatures). use crate::services::consolidation::upload_signature_service::upload_transmission_package_signature_service; -use crate::services::tasks_execution::*; -use crate::types::error::Error; -use crate::types::error::Result; use anyhow::Result as AnyhowResult; use anyhow::{anyhow, Context}; -use celery::error::TaskError; -use sequent_core::types::hasura::core::TasksExecution; use tracing::{info, instrument}; -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn create_transmission_package_task( - tenant_id: String, - election_id: String, - area_id: String, - tally_session_id: String, - force: bool, - task_execution: TasksExecution, -) -> Result<()> { - let task_execution_clone: TasksExecution = task_execution.clone(); - // Spawn the task using an async block - let handle = tokio::task::spawn_blocking({ - move || { - tokio::runtime::Handle::current().block_on(async move { - match create_transmission_package_service( - &tenant_id, - &election_id, - &area_id, - &tally_session_id, - force, - ) - .await - { - Ok(_) => Ok(()), - Err(err) => { - // Manually print the backtrace from this error: - info!( - "Captured backtrace inside spawn_blocking:\n{}", - err.backtrace() - ); - update_fail(&task_execution_clone, &err.to_string()).await?; - Err(err.context("Failed to create transmission package")) +/// Celery workers for MiRu transmission package creation and delivery. +mod miru_consolidation_tasks { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use crate::services::consolidation::create_transmission_package_service::create_transmission_package_service; + use crate::services::consolidation::send_transmission_package_service::send_transmission_package_service; + use crate::services::tasks_execution::*; + use crate::types::error::Error; + use crate::types::error::Result; + use anyhow::{anyhow, Context}; + use celery::error::TaskError; + use sequent_core::types::hasura::core::TasksExecution; + use tracing::{info, instrument}; + + /// Celery task: create a transmission package. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn create_transmission_package_task( + tenant_id: String, + election_id: String, + area_id: String, + tally_session_id: String, + force: bool, + task_execution: TasksExecution, + ) -> Result<()> { + let task_execution_clone: TasksExecution = task_execution.clone(); + let handle = tokio::task::spawn_blocking({ + move || { + tokio::runtime::Handle::current().block_on(async move { + match create_transmission_package_service( + &tenant_id, + &election_id, + &area_id, + &tally_session_id, + force, + ) + .await + { + Ok(_) => Ok(()), + Err(err) => { + info!( + "Captured backtrace inside spawn_blocking:\n{}", + err.backtrace() + ); + update_fail(&task_execution_clone, &err.to_string()).await?; + Err(err.context("Failed to create transmission package")) + } } - } - }) - } - }); + }) + } + }); - // Await the result and handle JoinError explicitly - match handle.await { - Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), - Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), - }?; + match handle.await { + Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), + Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), + }?; - update_complete(&task_execution, None) - .await - .context("Failed to update task execution status to COMPLETED")?; + update_complete(&task_execution, None) + .await + .context("Failed to update task execution status to COMPLETED")?; - Ok(()) -} + Ok(()) + } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn send_transmission_package_task( - tenant_id: String, - election_id: String, - area_id: String, - tally_session_id: String, -) -> Result<()> { - // Spawn the task using an async block - let handle = tokio::task::spawn_blocking({ - move || { - tokio::runtime::Handle::current().block_on(async move { - send_transmission_package_service( - &tenant_id, - &election_id, - &area_id, - &tally_session_id, - ) - .await - .map_err(|err| anyhow!("{}", err)) - }) - } - }); + /// Celery task: send a transmission package. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn send_transmission_package_task( + tenant_id: String, + election_id: String, + area_id: String, + tally_session_id: String, + ) -> Result<()> { + let handle = tokio::task::spawn_blocking({ + move || { + tokio::runtime::Handle::current().block_on(async move { + send_transmission_package_service( + &tenant_id, + &election_id, + &area_id, + &tally_session_id, + ) + .await + .map_err(|err| anyhow!("{}", err)) + }) + } + }); - // Await the result and handle JoinError explicitly - match handle.await { - Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), - Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), - }?; + match handle.await { + Ok(inner_result) => inner_result.map_err(|err| Error::from(err.context("Task failed"))), + Err(join_error) => Err(Error::from(anyhow!("Task panicked: {}", join_error))), + }?; - Ok(()) + Ok(()) + } } +pub use miru_consolidation_tasks::{ + create_transmission_package_task, send_transmission_package_task, +}; + +/// Uploads a trustee-signed transmission package using credentials supplied with the document. +/// +/// # Errors +/// +/// Returns an error if the blocking worker panics or signature upload fails. #[instrument(err)] pub async fn upload_signature_task( tenant_id: String, @@ -110,7 +126,6 @@ pub async fn upload_signature_task( document_id: String, password: String, ) -> AnyhowResult<()> { - // Spawn the task using an async block let handle = tokio::task::spawn_blocking({ move || { tokio::runtime::Handle::current().block_on(async move { @@ -127,12 +142,10 @@ pub async fn upload_signature_task( match res { Ok(_) => Ok(()), Err(err) => { - // Manually print the backtrace from this error: info!( "Captured backtrace inside spawn_blocking:\n{}", err.backtrace() ); - // Return the error so it still bubbles up Err(err) } } @@ -140,7 +153,6 @@ pub async fn upload_signature_task( } }); - // Await the result and handle JoinError explicitly match handle.await { Ok(inner_result) => inner_result.map_err(|err| err.context("Task failed")), Err(join_error) => Err(anyhow::Error::from(join_error).context("Task panicked")), diff --git a/packages/windmill/src/tasks/mod.rs b/packages/windmill/src/tasks/mod.rs index c97405d60f..a02d3d600f 100644 --- a/packages/windmill/src/tasks/mod.rs +++ b/packages/windmill/src/tasks/mod.rs @@ -2,6 +2,16 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Celery-registered workers: long-running exports, imports, ceremonies, and election lifecycle hooks. +//! +//! Some Celery task modules use localized `missing_docs` allows because +//! `#[celery::task]` generates internal structs, fields, and impls that trigger +//! Rust and Clippy missing-documentation lints. +//! +//! The allow attributes are intentionally scoped only to the macro-generated +//! task code so the rest of the module continues enforcing strict +//! documentation rules for domain logic and public APIs. + pub mod activity_logs_report; pub mod create_ballot_receipt; pub mod create_keys; diff --git a/packages/windmill/src/tasks/plugins_tasks.rs b/packages/windmill/src/tasks/plugins_tasks.rs index e918cf4a9e..7da69914f5 100644 --- a/packages/windmill/src/tasks/plugins_tasks.rs +++ b/packages/windmill/src/tasks/plugins_tasks.rs @@ -1,7 +1,11 @@ -use crate::postgres::document; // SPDX-FileCopyrightText: 2025 Sequent Legal // // SPDX-License-Identifier: AGPL-3.0-only +//! WASM plugin entrypoints to execute tasks. +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] + +use crate::postgres::document; use crate::services::plugins_manager::plugin_manager; use crate::services::tasks_execution::*; use crate::types::error::Error; @@ -13,6 +17,7 @@ use sequent_core::types::hasura::core::TasksExecution; use serde_json::Value; use tracing::{info, instrument}; +/// Celery task: dispatch a named WASM plugin entrypoint with JSON payload and optional document ID. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/post_tally.rs b/packages/windmill/src/tasks/post_tally.rs index 16de3984d2..380bfce94c 100644 --- a/packages/windmill/src/tasks/post_tally.rs +++ b/packages/windmill/src/tasks/post_tally.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Finalizes tally results and updates related documents. use crate::postgres::document::get_document; use crate::services::documents::upload_and_return_document; use crate::types::error::{Error, Result}; @@ -49,6 +49,11 @@ use tokio::time::Duration as ChronoDuration; use rayon::iter::IntoParallelIterator; use rayon::iter::ParallelIterator; +/// Downloads the SQLite results database referenced on the tally session execution. +/// +/// # Errors +/// +/// Fails if execution documents are missing, the sqlite document id is absent, or download fails. #[instrument(skip(hasura_transaction), err)] pub async fn download_sqlite_database( tenant_id: &str, @@ -91,6 +96,11 @@ pub async fn download_sqlite_database( Ok(sqlite_database) } +/// Walks `root_path` for `.html` files, renders each to PDF in parallel using `pdf_options`, and writes sibling `.pdf` files. +/// +/// # Errors +/// +/// Stops on the first I/O, render, temp-file, or copy error from any worker thread. pub fn find_and_process_html_reports_parallel( root_path: &Path, pdf_options: PrintToPdfOptionsLocal, @@ -148,6 +158,12 @@ pub fn find_and_process_html_reports_parallel( Ok(()) } +/// Rebuilds tally HTML reports as PDFs inside the decrypted archive, +/// re-uploads `tally.tar.gz` and SQLite. +/// +/// # Errors +/// +/// Propagates missing documents, sqlite/tar extraction, PDF render, upload, or Hasura update failures. #[instrument(err)] pub async fn post_tally_task_impl( tenant_id: String, @@ -373,52 +389,70 @@ pub async fn post_tally_task_impl( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 1_200_000, max_retries = 0, expires = 15)] -pub async fn post_tally_task( - tenant_id: String, - election_event_id: String, - tally_session_id: String, -) -> Result<()> { - let _permit = acquire_semaphore().await?; - let Ok(lock) = PgLock::acquire( - format!( - "post-tally-task-{}-{}-{}", - tenant_id, election_event_id, tally_session_id - ), - Uuid::new_v4().to_string(), - ISO8601::now() - .checked_add_signed(Duration::seconds(120)) - .expect("post_tally lock expiry overflow"), - ) - .await - else { - info!( - "Skipping: post tally in progress for event {} and session id {}", - election_event_id, tally_session_id - ); - return Ok(()); - }; - let mut interval = tokio::time::interval(ChronoDuration::from_secs(30)); - let mut current_task = tokio::spawn(post_tally_task_impl( - tenant_id, - election_event_id, - tally_session_id, - )); - let _res = loop { - tokio::select! { - _ = interval.tick() => { - // Execute the callback function here - lock.update_expiry().await?; - } - res = &mut current_task => { - - break res.map_err(|err| Error::String(format!("Error executing loop: {:?}", err))).flatten(); +mod post_tally_celery { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: serializes post-tally PDF work per event/session. + /// + /// # Errors + /// + /// Fails on lock/semaphore errors, join failures, or errors bubbled up from the post-tally implementation. + /// + /// # Panics + /// + /// Panics if lock expiry arithmetic overflows (misconfigured wall clock). + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 1_200_000, max_retries = 0, expires = 15)] + pub async fn post_tally_task( + tenant_id: String, + election_event_id: String, + tally_session_id: String, + ) -> Result<()> { + let _permit = acquire_semaphore().await?; + let Ok(lock) = PgLock::acquire( + format!( + "post-tally-task-{}-{}-{}", + tenant_id, election_event_id, tally_session_id + ), + Uuid::new_v4().to_string(), + ISO8601::now() + .checked_add_signed(Duration::seconds(120)) + .expect("post_tally lock expiry overflow"), + ) + .await + else { + info!( + "Skipping: post tally in progress for event {} and session id {}", + election_event_id, tally_session_id + ); + return Ok(()); + }; + let mut interval = tokio::time::interval(ChronoDuration::from_secs(30)); + let mut current_task = tokio::spawn(post_tally_task_impl( + tenant_id, + election_event_id, + tally_session_id, + )); + let _res = loop { + tokio::select! { + _ = interval.tick() => { + // Execute the callback function here + lock.update_expiry().await?; + } + res = &mut current_task => { + + break res.map_err(|err| Error::String(format!("Error executing loop: {:?}", err))).flatten(); + } } - } - }; - lock.release().await?; + }; + lock.release().await?; - Ok(()) + Ok(()) + } } + +pub use post_tally_celery::post_tally_task; diff --git a/packages/windmill/src/tasks/prepare_publication_preview.rs b/packages/windmill/src/tasks/prepare_publication_preview.rs index 454fc2437a..11faeb083e 100644 --- a/packages/windmill/src/tasks/prepare_publication_preview.rs +++ b/packages/windmill/src/tasks/prepare_publication_preview.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Prepares ballot publication preview artifacts. use crate::postgres::document::get_support_material_documents; use crate::postgres::election::get_elections; use crate::postgres::election_event::get_election_event_by_id; @@ -24,58 +25,79 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use tracing::{info, instrument}; +/// JSON snapshot of ballot styles, event metadata, and support materials for publication preview. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PublicationPreview { + /// Resolved ballot style JSON for the publication. pub ballot_styles: Value, - election_event: Value, - elections: Value, - support_materials: Value, - documents: Value, + /// Serialized election event data. + pub election_event: Value, + /// Elections under the event. + pub elections: Value, + /// Support material data and linked documents. + pub support_materials: Value, + /// Additional document metadata. + pub documents: Value, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 2)] -pub async fn prepare_publication_preview( - tenant_id: String, - election_event_id: String, - ballot_publication_id: String, - task_execution: TasksExecution, - document_id: String, -) -> Result<()> { - let mut hasura_db_client = get_hasura_pool() - .await - .get() - .await - .map_err(|e| format!("Failed to get db connection: {e:?}"))?; - - let hasura_transaction = hasura_db_client - .transaction() - .await - .map_err(|e| format!("Failed to get db transaction: {e:?}"))?; - - let result = prepare_publication_preview_task( - &hasura_transaction, - tenant_id, - election_event_id, - ballot_publication_id, - document_id, - ) - .await; +mod prepare_publication_preview_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: build a JSON publication preview and mark a document complete on success. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 2)] + pub async fn prepare_publication_preview( + tenant_id: String, + election_event_id: String, + ballot_publication_id: String, + task_execution: TasksExecution, + document_id: String, + ) -> Result<()> { + let mut hasura_db_client = get_hasura_pool() + .await + .get() + .await + .map_err(|e| format!("Failed to get db connection: {e:?}"))?; - match result { - Ok(document_id) => { - let _res = update_complete(&task_execution, Some(document_id.clone())).await; - Ok(()) - } - Err(err) => { - let err_str = format!("Error preparing publication preview: {err:?}"); - let _res = update_fail(&task_execution, &err.to_string()).await; - Err(err_str.into()) + let hasura_transaction = hasura_db_client + .transaction() + .await + .map_err(|e| format!("Failed to get db transaction: {e:?}"))?; + + let result = super::prepare_publication_preview_task( + &hasura_transaction, + tenant_id, + election_event_id, + ballot_publication_id, + document_id, + ) + .await; + + match result { + Ok(document_id) => { + let _res = update_complete(&task_execution, Some(document_id.clone())).await; + Ok(()) + } + Err(err) => { + let err_str = format!("Error preparing publication preview: {err:?}"); + let _res = update_fail(&task_execution, &err.to_string()).await; + Err(err_str.into()) + } } } } +pub use prepare_publication_preview_task::prepare_publication_preview; + +/// Loads publication JSON, event context, and uploads the combined preview document. +/// +/// # Errors +/// +/// Propagates DB, serialization, or S3 upload failures from the preview pipeline. #[instrument(err)] pub async fn prepare_publication_preview_task( hasura_transaction: &Transaction<'_>, @@ -145,7 +167,11 @@ pub async fn prepare_publication_preview_task( Ok(document_id) } -/// Get the support materials and document vectors in json. +/// Loads support materials with their linked documents and serializes both lists to JSON values. +/// +/// # Errors +/// +/// Propagates database, context, or JSON serialization failures. pub async fn get_support_material_documents_json( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -164,7 +190,11 @@ pub async fn get_support_material_documents_json( Ok((support_materials, documents)) } -/// Get the elections and mutate the status.voting_status to open +/// Loads elections for an event and rewrites each row’s voting status to open. +/// +/// # Errors +/// +/// Propagates database lookup or JSON serialization failures. pub async fn get_elections_json_with_open_status( hasura_transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/tasks/process_board.rs b/packages/windmill/src/tasks/process_board.rs index 60ab396629..ad25e7fa8e 100644 --- a/packages/windmill/src/tasks/process_board.rs +++ b/packages/windmill/src/tasks/process_board.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Processes election event board activities and tally workflows. use anyhow::{Context, Result as AnyhowResult}; use celery::error::TaskError; use deadpool_postgres::{Client as DbClient, Transaction}; @@ -20,6 +20,11 @@ use crate::tasks::post_tally::post_tally_task; use crate::tasks::set_public_key::set_public_key; use crate::types::error::Result; +/// Loads keys ceremonies and tally sessions for an election event and enqueues follow-up Celery tasks. +/// +/// # Errors +/// +/// Fails on database errors, missing election data, or when dispatching a child task to Celery fails. #[instrument(err)] pub async fn process_board_impl(tenant_id: String, election_event_id: String) -> AnyhowResult<()> { let mut hasura_db_client: DbClient = get_hasura_pool().await.get().await?; @@ -114,11 +119,21 @@ pub async fn process_board_impl(tenant_id: String, election_event_id: String) -> Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn process_board(tenant_id: String, election_event_id: String) -> Result<()> { - process_board_impl(tenant_id, election_event_id).await?; +mod process_board_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - Ok(()) + use super::*; + + /// Celery task: drives keys-ceremony and tally-related work for one election event by enqueueing downstream tasks. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn process_board(tenant_id: String, election_event_id: String) -> Result<()> { + process_board_impl(tenant_id, election_event_id).await?; + + Ok(()) + } } + +pub use process_board_task::process_board; diff --git a/packages/windmill/src/tasks/render_document_pdf.rs b/packages/windmill/src/tasks/render_document_pdf.rs index d63ba1769a..a1ea7397a4 100644 --- a/packages/windmill/src/tasks/render_document_pdf.rs +++ b/packages/windmill/src/tasks/render_document_pdf.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Renders tally results document to PDF. use crate::postgres::document::get_document; use crate::services::ceremonies::velvet_tally::generate_initial_state; use crate::services::compress::extract_archive_to_temp_dir; @@ -23,6 +24,11 @@ use tracing::instrument; use velvet::config::generate_reports::PipeConfigGenerateReports; use velvet::pipes::pipe_name::PipeName; +/// Resolves PDF print options from the velvet `generate-reports` pipe config. +/// +/// # Errors +/// +/// Fails when ids are incomplete, the tally package cannot be read, or the pipe config is missing or invalid. #[instrument(err, skip(hasura_transaction))] pub async fn get_tally_pdf_config( hasura_transaction: &Transaction<'_>, @@ -68,6 +74,11 @@ pub async fn get_tally_pdf_config( .map(|option| option.to_print_to_pdf_options())) } +/// Converts an existing HTML document to PDF with tally-derived print options and uploads the PDF. +/// +/// # Errors +/// +/// Propagates pool/transaction, missing document, render, temp path, or upload failures. #[instrument(err)] pub async fn render_document_pdf_wrap( tenant_id: String, @@ -138,6 +149,11 @@ pub async fn render_document_pdf_wrap( Ok(()) } +/// Runs HTML-to-PDF conversion. +/// +/// # Errors +/// +/// Propagates render failures or task execution update errors. #[instrument(err)] pub async fn render_document_pdf_task_wrap( tenant_id: String, @@ -170,28 +186,38 @@ pub async fn render_document_pdf_task_wrap( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 60000, max_retries = 2)] -pub async fn render_document_pdf( - tenant_id: String, - document_id: String, - election_event_id: Option, - task_execution: TasksExecution, - executer_username: Option, - output_document_id: String, - tally_session_id: Option, -) -> WrapResult<()> { - // Note, put this in a thread? - render_document_pdf_task_wrap( - tenant_id, - document_id, - election_event_id, - task_execution, - executer_username, - output_document_id, - tally_session_id, - ) - .await - .map_err(|err| WrapError::from(anyhow!("Task panicked: {}", err))) +mod render_document_pdf_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: renders a tally-linked HTML document to PDF and records completion on the task execution. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 60000, max_retries = 2)] + pub async fn render_document_pdf( + tenant_id: String, + document_id: String, + election_event_id: Option, + task_execution: TasksExecution, + executer_username: Option, + output_document_id: String, + tally_session_id: Option, + ) -> WrapResult<()> { + // Note, put this in a thread? + render_document_pdf_task_wrap( + tenant_id, + document_id, + election_event_id, + task_execution, + executer_username, + output_document_id, + tally_session_id, + ) + .await + .map_err(|err| WrapError::from(anyhow!("Task panicked: {}", err))) + } } + +pub use render_document_pdf_task::render_document_pdf; diff --git a/packages/windmill/src/tasks/render_report.rs b/packages/windmill/src/tasks/render_report.rs index b4c2911e29..a86c83047c 100644 --- a/packages/windmill/src/tasks/render_report.rs +++ b/packages/windmill/src/tasks/render_report.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Renders report HTML/PDF. use crate::postgres::render_report::render_report_task; use crate::services::database::get_hasura_pool; use crate::services::tasks_semaphore::acquire_semaphore; @@ -12,66 +13,88 @@ use serde::{Deserialize, Serialize}; use serde_json::{Map, Value}; use tracing::instrument; +/// Format type for the rendered report. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub enum FormatType { + /// Text or HTML. TEXT, + /// PDF. PDF, } +/// Body sent to the render task. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RenderTemplateBody { + /// Mustache-style template. pub template: String, + /// Document name. pub name: String, + /// Variables merged into the template context. pub variables: Map, + /// Format type for the rendered report. pub format: FormatType, } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 60000)] -pub async fn render_report( - input: RenderTemplateBody, - tenant_id: String, - election_event_id: String, -) -> Result<()> { - let _permit = acquire_semaphore().await?; - // Spawn the task using an async block - let handle = tokio::task::spawn_blocking({ - move || { - tokio::runtime::Handle::current().block_on(async move { - let mut db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .map_err(|err| format!("Error getting DB pool: {err:?}"))?; +mod render_report_celery { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - let hasura_transaction = match db_client.transaction().await { - Ok(transaction) => transaction, - Err(err) => { - return Err(format!("Error starting Hasura transaction: {err}")); - } - }; + use super::*; - render_report_task(&hasura_transaction, input, tenant_id, election_event_id) - .await - .map_err(|err| format!("{err}"))?; + /// Celery task: renders `input` inside a blocking Hasura transaction using the shared Postgres render helper. + /// + /// # Errors + /// + /// Fails on semaphore acquisition, DB pool/transaction errors, render failures, commit errors, or join panics. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 60000)] + pub async fn render_report( + input: RenderTemplateBody, + tenant_id: String, + election_event_id: String, + ) -> Result<()> { + let _permit = acquire_semaphore().await?; + // Spawn the task using an async block + let handle = tokio::task::spawn_blocking({ + move || { + tokio::runtime::Handle::current().block_on(async move { + let mut db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .map_err(|err| format!("Error getting DB pool: {err:?}"))?; - match hasura_transaction.commit().await { - Ok(_) => (), - Err(err) => { - return Err(format!("Commit failed: {err}")); - } - }; - Ok(()) - }) - } - }); + let hasura_transaction = match db_client.transaction().await { + Ok(transaction) => transaction, + Err(err) => { + return Err(format!("Error starting Hasura transaction: {err}")); + } + }; - // Await the result and handle JoinError explicitly - match handle.await { - Ok(inner_result) => Ok(inner_result.map_err(|err| format!("Task failed: {err:?}"))?), - Err(join_error) => Err(format!("Join error. Task panicked: {join_error:?}")), - }?; + render_report_task(&hasura_transaction, input, tenant_id, election_event_id) + .await + .map_err(|err| format!("{err}"))?; - Ok(()) + match hasura_transaction.commit().await { + Ok(_) => (), + Err(err) => { + return Err(format!("Commit failed: {err}")); + } + }; + Ok(()) + }) + } + }); + + // Await the result and handle JoinError explicitly + match handle.await { + Ok(inner_result) => Ok(inner_result.map_err(|err| format!("Task failed: {err:?}"))?), + Err(join_error) => Err(format!("Join error. Task panicked: {join_error:?}")), + }?; + + Ok(()) + } } + +pub use render_report_celery::render_report; diff --git a/packages/windmill/src/tasks/review_boards.rs b/packages/windmill/src/tasks/review_boards.rs index d3b83e3218..79d0352381 100644 --- a/packages/windmill/src/tasks/review_boards.rs +++ b/packages/windmill/src/tasks/review_boards.rs @@ -1,6 +1,9 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Reviews board activities and tally workflows. +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] use crate::postgres::election_event::get_batch_election_events; use crate::postgres::tenant::get_tenant_by_id; use crate::services::celery_app::get_celery_app; @@ -16,6 +19,12 @@ use std::env; use tracing::instrument; use tracing::{event, Level}; +/// Celery task: ensures the platform default tenant exists and walks +/// election events in batches, enqueueing `process_board` for each. +/// +/// # Errors +/// +/// Fails if required environment variables, the database, Keycloak checks, or Celery dispatch return an error. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(expires = 30)] diff --git a/packages/windmill/src/tasks/scheduled_events.rs b/packages/windmill/src/tasks/scheduled_events.rs index 1b6d00bd57..ed0050b4f1 100644 --- a/packages/windmill/src/tasks/scheduled_events.rs +++ b/packages/windmill/src/tasks/scheduled_events.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Dispatches calendar-driven scheduled events across election events. use crate::postgres::{ election::{get_election_by_id, update_election_presentation}, scheduled_event::find_all_active_events, @@ -34,6 +35,7 @@ use std::sync::Arc; use tracing::instrument; use tracing::{event, info, Level}; +/// Parses the scheduled date into the machine local timezone. #[instrument] pub fn get_datetime(event: &ScheduledEvent) -> Option> { let cron_config = event.cron_config.clone()?; @@ -41,6 +43,11 @@ pub fn get_datetime(event: &ScheduledEvent) -> Option> { ISO8601::to_date(&scheduled_date).ok() } +/// Enqueues init-report scheduled event. +/// +/// # Errors +/// +/// Fails on payload deserialization or Celery dispatch errors. #[instrument(skip(celery_app), err)] pub async fn handle_allow_init_report( celery_app: Arc, @@ -106,6 +113,11 @@ pub async fn handle_allow_init_report( Ok(()) } +/// Schedules voting-period-end handling for a single election or the whole election event. +/// +/// # Errors +/// +/// Fails on payload deserialization or Celery dispatch errors. #[instrument(skip(celery_app), err)] pub async fn handle_allow_voting_period_end( celery_app: Arc, @@ -171,6 +183,11 @@ pub async fn handle_allow_voting_period_end( Ok(()) } +/// Schedules start/end voting window tasks for an election-specific payload or the entire event. +/// +/// # Errors +/// +/// Fails on payload deserialization or Celery dispatch errors. #[instrument(skip(celery_app), err)] pub async fn handle_voting_event( celery_app: Arc, @@ -236,6 +253,11 @@ pub async fn handle_voting_event( Ok(()) } +/// Enqueues the enrollment window scheduled event. +/// +/// # Errors +/// +/// Fails on Celery dispatch errors. #[instrument(skip(celery_app), err)] pub async fn handle_election_event_enrollment( celery_app: Arc, @@ -274,6 +296,12 @@ pub async fn handle_election_event_enrollment( Ok(()) } +/// Schedules the lockdown Celery task at the configured datetime for the tenant and election event. +/// +/// # Errors +/// +/// Fails on Celery dispatch errors. +#[instrument(skip(celery_app), err)] pub async fn handle_election_lockdown( celery_app: Arc, scheduled_event: &ScheduledEvent, @@ -308,6 +336,11 @@ pub async fn handle_election_lockdown( Ok(()) } +/// Enqueues the allow-tally scheduled event. +/// +/// # Errors +/// +/// Fails on payload deserialization or Celery dispatch errors. #[instrument(skip(celery_app), err)] pub async fn handle_election_allow_tally( celery_app: Arc, @@ -352,86 +385,106 @@ pub async fn handle_election_allow_tally( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn scheduled_events(rate_seconds: u64) -> Result<()> { - let celery_app = get_celery_app().await; - let now = ISO8601::now(); - let nsecs_later = now - .checked_add_signed(Duration::seconds( - i64::try_from(rate_seconds).expect("scheduled_events rate_seconds exceeds i64"), - )) - .expect("scheduled_events comparison time overflow"); - let mut hasura_db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .map_err(|e| anyhow!("Error getting hasura client {}", e))?; - let hasura_transaction = hasura_db_client - .transaction() - .await - .map_err(|e| anyhow!("Error creating a hasura transaction {}", e))?; +mod scheduled_events_celery { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - let scheduled_events = find_all_active_events(&hasura_transaction) - .await - .map_err(|e| anyhow!("Error finding all active events {}", e))?; - info!("Found {} scheduled events", scheduled_events.len()); - let to_be_run_now = scheduled_events - .iter() - .filter(|event| { - let Some(formatted_date) = get_datetime(event) else { - return false; + use super::*; + + /// Celery task: loads active scheduled rows due soon and dispatches the matching scheduled events tasks. + /// + /// # Errors + /// + /// Fails on Hasura pool/transaction errors, handler failures for strict paths, or deserialization issues bubbled from handlers. + /// + /// # Panics + /// + /// Panics if `rate_seconds` does not fit in `i64` or time arithmetic overflows. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn scheduled_events(rate_seconds: u64) -> Result<()> { + let celery_app = get_celery_app().await; + let now = ISO8601::now(); + let nsecs_later = now + .checked_add_signed(Duration::seconds( + i64::try_from(rate_seconds).expect("scheduled_events rate_seconds exceeds i64"), + )) + .expect("scheduled_events comparison time overflow"); + let mut hasura_db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .map_err(|e| anyhow!("Error getting hasura client {}", e))?; + let hasura_transaction = hasura_db_client + .transaction() + .await + .map_err(|e| anyhow!("Error creating a hasura transaction {}", e))?; + + let scheduled_events = find_all_active_events(&hasura_transaction) + .await + .map_err(|e| anyhow!("Error finding all active events {}", e))?; + info!("Found {} scheduled events", scheduled_events.len()); + let to_be_run_now = scheduled_events + .iter() + .filter(|event| { + let Some(formatted_date) = get_datetime(event) else { + return false; + }; + formatted_date < nsecs_later + }) + .collect::>(); + info!("Found {} events to be run now", to_be_run_now.len()); + for scheduled_event in to_be_run_now { + let Some(event_processor) = scheduled_event.event_processor.clone() else { + continue; }; - formatted_date < nsecs_later - }) - .collect::>(); - info!("Found {} events to be run now", to_be_run_now.len()); - for scheduled_event in to_be_run_now { - let Some(event_processor) = scheduled_event.event_processor.clone() else { - continue; - }; - match event_processor { - EventProcessors::ALLOW_INIT_REPORT => { - handle_allow_init_report(celery_app.clone(), scheduled_event).await?; - } - EventProcessors::ALLOW_VOTING_PERIOD_END => { - handle_allow_voting_period_end(celery_app.clone(), scheduled_event).await?; - } - EventProcessors::START_VOTING_PERIOD | EventProcessors::END_VOTING_PERIOD => { - if let Err(err) = handle_voting_event(celery_app.clone(), scheduled_event).await { - event!( - Level::ERROR, - "Event {} failed with error {}", - scheduled_event.id, - err, - ); - } else { - event!( - Level::INFO, - "Event {} executed successfully", - scheduled_event.id, - ); + match event_processor { + EventProcessors::ALLOW_INIT_REPORT => { + handle_allow_init_report(celery_app.clone(), scheduled_event).await?; + } + EventProcessors::ALLOW_VOTING_PERIOD_END => { + handle_allow_voting_period_end(celery_app.clone(), scheduled_event).await?; + } + EventProcessors::START_VOTING_PERIOD | EventProcessors::END_VOTING_PERIOD => { + if let Err(err) = handle_voting_event(celery_app.clone(), scheduled_event).await + { + event!( + Level::ERROR, + "Event {} failed with error {}", + scheduled_event.id, + err, + ); + } else { + event!( + Level::INFO, + "Event {} executed successfully", + scheduled_event.id, + ); + } + } + EventProcessors::START_ENROLLMENT_PERIOD + | EventProcessors::END_ENROLLMENT_PERIOD => { + handle_election_event_enrollment(celery_app.clone(), scheduled_event).await?; + } + EventProcessors::START_LOCKDOWN_PERIOD | EventProcessors::END_LOCKDOWN_PERIOD => { + handle_election_lockdown(celery_app.clone(), scheduled_event).await?; + } + EventProcessors::ALLOW_TALLY => { + handle_election_allow_tally(celery_app.clone(), scheduled_event).await?; + } + EventProcessors::CREATE_REPORT | EventProcessors::SEND_TEMPLATE => { + // Nothing to do for these event processors. Avoid a + // catch all to ignore unknown events, this way when + // new variants are added to `EventProcessors`, a + // compile time error will happen notifying about the + // missing logic for handling that new variant. } - } - EventProcessors::START_ENROLLMENT_PERIOD | EventProcessors::END_ENROLLMENT_PERIOD => { - handle_election_event_enrollment(celery_app.clone(), scheduled_event).await?; - } - EventProcessors::START_LOCKDOWN_PERIOD | EventProcessors::END_LOCKDOWN_PERIOD => { - handle_election_lockdown(celery_app.clone(), scheduled_event).await?; - } - EventProcessors::ALLOW_TALLY => { - handle_election_allow_tally(celery_app.clone(), scheduled_event).await?; - } - EventProcessors::CREATE_REPORT | EventProcessors::SEND_TEMPLATE => { - // Nothing to do for these event processors. Avoid a - // catch all to ignore unknown events, this way when - // new variants are added to `EventProcessors`, a - // compile time error will happen notifying about the - // missing logic for handling that new variant. } } - } - Ok(()) + Ok(()) + } } + +pub use scheduled_events_celery::scheduled_events; diff --git a/packages/windmill/src/tasks/scheduled_reports.rs b/packages/windmill/src/tasks/scheduled_reports.rs index d904930e3b..e64aa4adcc 100644 --- a/packages/windmill/src/tasks/scheduled_reports.rs +++ b/packages/windmill/src/tasks/scheduled_reports.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Handles scheduled report generation. use crate::postgres::reports::{get_all_active_reports, update_report_last_document_time, Report}; use crate::services::celery_app::get_celery_app; use crate::services::database::get_hasura_pool; @@ -67,6 +67,7 @@ pub fn get_next_scheduled_time(report: &Report) -> Option> { Some(next_run.with_timezone(&Local)) } +/// Parses timestamps from strings. fn parse_last_document_produced(date_str: &str) -> Option> { let format = "%Y-%m-%dT%H:%M:%S%.f"; match NaiveDateTime::parse_from_str(date_str, format) { @@ -78,106 +79,120 @@ fn parse_last_document_produced(date_str: &str) -> Option> { } } -/// The Celery task for scheduling reports based on cron configuration. -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(time_limit = 10, max_retries = 0, expires = 30)] -pub async fn scheduled_reports(rate_seconds: u64) -> Result<()> { - // Get the Celery app for scheduling tasks - let celery_app = get_celery_app().await; - - // Get the current time - let now = Local::now(); - let nsecs_later = now - .checked_add_signed(Duration::seconds( - i64::try_from(rate_seconds).expect("scheduled_reports rate_seconds exceeds i64"), - )) - .expect("scheduled_reports comparison time overflow"); - - let mut hasura_db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .map_err(|e| anyhow!("Error getting hasura client: {e}"))?; - - let hasura_transaction = hasura_db_client.transaction().await?; - - // Fetch all active reports from the database - let active_reports = get_all_active_reports(&hasura_transaction) - .await - .map_err(|err| anyhow!("Error getting all active reports: {err:?}"))?; - info!("Found {len} active reports", len = active_reports.len()); - - // Filter out reports that need to run now based on their cron configuration - let to_be_run_now = active_reports - .iter() - .filter(|report| { - let Some(formatted_date) = get_next_scheduled_time(report) else { - return false; +mod scheduled_reports_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: finds active reports whose next cron run falls inside the + /// lookahead window and enqueues [`generate_report`]. + /// + /// # Errors + /// + /// Fails on database pool/transaction errors, task execution creation, Celery dispatch, or commit failures. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(time_limit = 10, max_retries = 0, expires = 30)] + pub async fn scheduled_reports(rate_seconds: u64) -> Result<()> { + // Get the Celery app for scheduling tasks + let celery_app = get_celery_app().await; + + // Get the current time + let now = Local::now(); + let nsecs_later = now + .checked_add_signed(Duration::seconds( + i64::try_from(rate_seconds).expect("scheduled_reports rate_seconds exceeds i64"), + )) + .expect("scheduled_reports comparison time overflow"); + + let mut hasura_db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .map_err(|e| anyhow!("Error getting hasura client: {e}"))?; + + let hasura_transaction = hasura_db_client.transaction().await?; + + // Fetch all active reports from the database + let active_reports = get_all_active_reports(&hasura_transaction) + .await + .map_err(|err| anyhow!("Error getting all active reports: {err:?}"))?; + info!("Found {len} active reports", len = active_reports.len()); + + // Filter out reports that need to run now based on their cron configuration + let to_be_run_now = active_reports + .iter() + .filter(|report| { + let Some(formatted_date) = get_next_scheduled_time(report) else { + return false; + }; + formatted_date < nsecs_later + }) + .collect::>(); + info!( + "Found {num} reports to be run now", + num = to_be_run_now.len() + ); + + // Schedule the task for each report that needs to run + for report in to_be_run_now { + let Some(datetime) = get_next_scheduled_time(report) else { + continue; }; - formatted_date < nsecs_later - }) - .collect::>(); - info!( - "Found {num} reports to be run now", - num = to_be_run_now.len() - ); - // Schedule the task for each report that needs to run - for report in to_be_run_now { - let Some(datetime) = get_next_scheduled_time(report) else { - continue; - }; - - let cron_config = report - .cron_config - .clone() - .ok_or_else(|| anyhow!("Cron config not found"))?; - - let document_id = Uuid::new_v4().to_string(); - - // Create a task execution record for this report generation - let task_execution = tasks_execution::post( - &report.tenant_id, - Some(report.election_event_id.as_str()), - ETasksExecution::GENERATE_REPORT, - &cron_config.executer_username, - ) - .await - .map_err(|err| anyhow!("Error creating task execution record: {err:?}"))?; - - let _task = celery_app - .send_task( - generate_report::new( - report.clone(), - document_id.clone(), - GenerateReportMode::REAL, - cron_config.is_active, - Some(task_execution), - Some(cron_config.executer_username), - None, - ) - .with_eta(datetime.with_timezone(&Utc)) - .with_expires_in(120), + let cron_config = report + .cron_config + .clone() + .ok_or_else(|| anyhow!("Cron config not found"))?; + + let document_id = Uuid::new_v4().to_string(); + + // Create a task execution record for this report generation + let task_execution = tasks_execution::post( + &report.tenant_id, + Some(report.election_event_id.as_str()), + ETasksExecution::GENERATE_REPORT, + &cron_config.executer_username, ) .await - .map_err(|err| anyhow!("Error sending generate_report task: {err:?}"))?; + .map_err(|err| anyhow!("Error creating task execution record: {err:?}"))?; + + let _task = celery_app + .send_task( + generate_report::new( + report.clone(), + document_id.clone(), + GenerateReportMode::REAL, + cron_config.is_active, + Some(task_execution), + Some(cron_config.executer_username), + None, + ) + .with_eta(datetime.with_timezone(&Utc)) + .with_expires_in(120), + ) + .await + .map_err(|err| anyhow!("Error sending generate_report task: {err:?}"))?; - update_report_last_document_time(&hasura_transaction, &report.tenant_id, &report.id) - .await - .map_err(|err| anyhow!("Error updating report last document time: {err:?}"))?; + update_report_last_document_time(&hasura_transaction, &report.tenant_id, &report.id) + .await + .map_err(|err| anyhow!("Error updating report last document time: {err:?}"))?; - event!( - Level::INFO, - "Scheduled report task with id: {id}", - id = report.id - ); - } + event!( + Level::INFO, + "Scheduled report task with id: {id}", + id = report.id + ); + } - hasura_transaction - .commit() - .await - .map_err(|err| anyhow!("Error committing hasura transaction: {err:?}"))?; + hasura_transaction + .commit() + .await + .map_err(|err| anyhow!("Error committing hasura transaction: {err:?}"))?; - Ok(()) + Ok(()) + } } + +pub use scheduled_reports_task::scheduled_reports; diff --git a/packages/windmill/src/tasks/send_template.rs b/packages/windmill/src/tasks/send_template.rs index dd58471395..325d2d3081 100644 --- a/packages/windmill/src/tasks/send_template.rs +++ b/packages/windmill/src/tasks/send_template.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Sends communication templates (email/SMS) to users. use crate::postgres::area::get_elections_by_area; use crate::postgres::election_event::get_election_event_by_id_if_exist; use crate::services::celery_app::get_celery_app; @@ -42,6 +43,11 @@ use std::default::Default; use strand::info; use tracing::{event, info, instrument, Level}; +/// Builds the JSON variables for template rendering. +/// +/// # Errors +/// +/// Fails on `VOTING_PORTAL_URL` missing or auth URL construction errors. #[instrument(err)] fn get_variables( user: &User, @@ -85,6 +91,11 @@ fn get_variables( Ok(variables) } +/// Renders the SMS template and sends it when both a handset number and SMS config are configured. +/// +/// # Errors +/// +/// Fails on template render errors or SMS provider failures. #[instrument(skip(sender), err)] async fn send_template_sms( receiver: &Option, @@ -109,6 +120,11 @@ async fn send_template_sms( Ok(None) } +/// Renders subject and body templates and sends via the configured email sender when a receiver and template exist. +/// +/// # Errors +/// +/// Fails on template render errors or email transport failures. #[instrument(skip(sender), err)] pub async fn send_template_email( receiver: &Option, @@ -162,17 +178,29 @@ pub async fn send_template_email( Ok(None) } +/// Send counters for one aggregation scope (event-wide or single election). #[derive(Default, Debug)] struct MetricsUnit { + /// Number of emails successfully sent in this scope. num_emails_sent: i64, + /// Number of SMS messages successfully sent in this scope. num_sms_sent: i64, } + +/// Tracks successful sends at election-event level and per-election for area-based voters. #[derive(Default, Debug)] struct Metrics { + /// Totals for the whole election event audience. election_event: MetricsUnit, + /// Per-election totals derived from the voter’s area mapping. metrics_by_election_id: HashMap, } +/// Increments email or SMS counters on `metrics_unit`. +/// +/// # Panics +/// +/// Panics if counters overflow `i64` (pathological send volume). fn update_metrics_unit(metrics_unit: &mut MetricsUnit, communication_method: &TemplateMethod) { match communication_method { TemplateMethod::EMAIL => { @@ -191,6 +219,7 @@ fn update_metrics_unit(metrics_unit: &mut MetricsUnit, communication_method: &Te }; } +/// Updates in-memory metrics for the event and for each election tied to the user’s area after a successful send. fn update_metrics( metrics: &mut Metrics, elections_by_area: &HashMap>, @@ -233,6 +262,15 @@ fn update_metrics( }); } +/// Persists aggregated email/SMS counts to election event and per-election statistics. +/// +/// # Errors +/// +/// Fails on Hasura update errors or overflow when summing counters. +/// +/// # Panics +/// +/// Panics if summed counters overflow `i64`. async fn update_stats( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -281,6 +319,11 @@ async fn update_stats( Ok(()) } +/// Appends a send-template entry to the election event immu-board when an event context exists. +/// +/// # Errors +/// +/// Fails when the bulletin board cannot be resolved or the electoral log write fails. async fn on_success_send_message( hasura_transaction: &Transaction<'_>, election_event: Option, @@ -329,210 +372,223 @@ async fn on_success_send_message( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task] -pub async fn send_template( - body: SendTemplateBody, - tenant_id: String, - admin_id: String, - election_event_id: Option, -) -> Result<()> { - let celery_app = get_celery_app().await; - let realm = match election_event_id { - Some(ref election_event_id) => get_event_realm(&tenant_id, election_event_id), - None => get_tenant_realm(&tenant_id), - }; - - let mut hasura_db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .map_err(|err| format!("Error getting hasura db pool: {err}"))?; - - let hasura_transaction = hasura_db_client - .transaction() - .await - .map_err(|err| format!("Error starting hasura transaction: {err}"))?; - - let election_event = match election_event_id.clone() { - None => None, - Some(election_event_id) => { - get_election_event_by_id_if_exist(&hasura_transaction, &tenant_id, &election_event_id) - .await? - } - }; - - let mut keycloak_db_client: DbClient = get_keycloak_pool() - .await - .get() - .await - .map_err(|err| anyhow!("{}", err))?; - let batch_size = PgConfig::from_env()?.default_sql_batch_size; - - let Some(audience_selection) = body.audience_selection.clone() else { - return Err(Error::String("Missing audience selection".to_string())); - }; - let user_ids = match audience_selection { - AudienceSelection::SELECTED => body.audience_voter_ids.clone(), - _ => None, - }; - - // perform listing in batches in a read-only repeatable transaction, and - // perform stats updates in a new stats transaction each time - because for - // each mail/sms sent, there's no rollback for that. - let mut processed: i32 = 0; - event!(Level::INFO, "before transaction"); - let keycloak_transaction = keycloak_db_client - .transaction() - .await - .map_err(|err| anyhow!("{err}"))?; - event!(Level::INFO, "before isolation"); - keycloak_transaction - .simple_query("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;") - .await - .with_context(|| "can't set transaction isolation level")?; - event!(Level::INFO, "after isolation"); - let mut hasura_db_client: DbClient = get_hasura_pool() - .await - .get() - .await - .with_context(|| "Error loading hasura db client")?; +mod send_template_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] + + use super::*; + + /// Celery task: send templates (email/SMS) to users. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task] + pub async fn send_template( + body: SendTemplateBody, + tenant_id: String, + admin_id: String, + election_event_id: Option, + ) -> Result<()> { + let celery_app = get_celery_app().await; + let realm = match election_event_id { + Some(ref election_event_id) => get_event_realm(&tenant_id, election_event_id), + None => get_tenant_realm(&tenant_id), + }; - let elections_by_area = match election_event_id.clone() { - None => HashMap::new(), - Some(ref election_event_id) => get_elections_by_area( - &hasura_transaction, - tenant_id.as_str(), - election_event_id.as_str(), - ) - .await - .with_context(|| "Error listing elections by area")?, - }; + let mut hasura_db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .map_err(|err| format!("Error getting hasura db pool: {err}"))?; - loop { let hasura_transaction = hasura_db_client .transaction() .await - .with_context(|| "Error creating a transaction")?; - - let filter = ListUsersFilter { - tenant_id: tenant_id.clone(), - election_event_id: election_event_id.clone(), - election_id: None, - area_id: None, - realm: realm.clone(), - search: None, - first_name: None, - last_name: None, - username: None, - email: None, - limit: Some(batch_size), - offset: Some(processed), - user_ids: user_ids.clone(), - attributes: None, - enabled: None, - email_verified: None, - sort: None, - has_voted: None, - authorized_to_election_alias: None, - }; + .map_err(|err| format!("Error starting hasura transaction: {err}"))?; - let (users, total_count) = match audience_selection { - AudienceSelection::NOT_VOTED | AudienceSelection::VOTED => { - list_users_with_vote_info(&hasura_transaction, &keycloak_transaction, filter) - .await - .with_context(|| "Failed to featch list_users_with_vote_info")? + let election_event = match election_event_id.clone() { + None => None, + Some(election_event_id) => { + get_election_event_by_id_if_exist( + &hasura_transaction, + &tenant_id, + &election_event_id, + ) + .await? } - _ => list_users(&hasura_transaction, &keycloak_transaction, filter) - .await - .with_context(|| "Failed to featch list_users")?, }; - let mut filtered_users = users.clone(); + let mut keycloak_db_client: DbClient = get_keycloak_pool() + .await + .get() + .await + .map_err(|err| anyhow!("{}", err))?; + let batch_size = PgConfig::from_env()?.default_sql_batch_size; - match audience_selection { - AudienceSelection::NOT_VOTED => filtered_users.retain(|user| { - user.votes_info - .as_ref() - .is_some_and(|vote_info| vote_info.is_empty()) - }), - AudienceSelection::VOTED => filtered_users.retain(|user| { - user.votes_info - .as_ref() - .is_some_and(|vote_info| !vote_info.is_empty()) - }), - _ => {} + let Some(audience_selection) = body.audience_selection.clone() else { + return Err(Error::String("Missing audience selection".to_string())); }; - - let email_sender = EmailSender::new().await?; - let sms_sender = SmsSender::new().await?; - let mut metrics = Metrics { - election_event: MetricsUnit { - num_emails_sent: 0, - num_sms_sent: 0, - }, - metrics_by_election_id: Default::default(), + let user_ids = match audience_selection { + AudienceSelection::SELECTED => body.audience_voter_ids.clone(), + _ => None, }; - let Some(communication_method) = body.communication_method.clone() else { - return Err(Error::String("Missing template method".into())); + // perform listing in batches in a read-only repeatable transaction, and + // perform stats updates in a new stats transaction each time - because for + // each mail/sms sent, there's no rollback for that. + let mut processed: i32 = 0; + event!(Level::INFO, "before transaction"); + let keycloak_transaction = keycloak_db_client + .transaction() + .await + .map_err(|err| anyhow!("{err}"))?; + event!(Level::INFO, "before isolation"); + keycloak_transaction + .simple_query("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;") + .await + .with_context(|| "can't set transaction isolation level")?; + event!(Level::INFO, "after isolation"); + let mut hasura_db_client: DbClient = get_hasura_pool() + .await + .get() + .await + .with_context(|| "Error loading hasura db client")?; + + let elections_by_area = match election_event_id.clone() { + None => HashMap::new(), + Some(ref election_event_id) => get_elections_by_area( + &hasura_transaction, + tenant_id.as_str(), + election_event_id.as_str(), + ) + .await + .with_context(|| "Error listing elections by area")?, }; - for user in filtered_users.iter() { - let success = send_template_email_or_sms( + loop { + let hasura_transaction = hasura_db_client + .transaction() + .await + .with_context(|| "Error creating a transaction")?; + + let filter = ListUsersFilter { + tenant_id: tenant_id.clone(), + election_event_id: election_event_id.clone(), + election_id: None, + area_id: None, + realm: realm.clone(), + search: None, + first_name: None, + last_name: None, + username: None, + email: None, + limit: Some(batch_size), + offset: Some(processed), + user_ids: user_ids.clone(), + attributes: None, + enabled: None, + email_verified: None, + sort: None, + has_voted: None, + authorized_to_election_alias: None, + }; + + let (users, total_count) = match audience_selection { + AudienceSelection::NOT_VOTED | AudienceSelection::VOTED => { + list_users_with_vote_info(&hasura_transaction, &keycloak_transaction, filter) + .await + .with_context(|| "Failed to featch list_users_with_vote_info")? + } + _ => list_users(&hasura_transaction, &keycloak_transaction, filter) + .await + .with_context(|| "Failed to featch list_users")?, + }; + + let mut filtered_users = users.clone(); + + match audience_selection { + AudienceSelection::NOT_VOTED => filtered_users.retain(|user| { + user.votes_info + .as_ref() + .is_some_and(|vote_info| vote_info.is_empty()) + }), + AudienceSelection::VOTED => filtered_users.retain(|user| { + user.votes_info + .as_ref() + .is_some_and(|vote_info| !vote_info.is_empty()) + }), + _ => {} + }; + + let email_sender = EmailSender::new().await?; + let sms_sender = SmsSender::new().await?; + let mut metrics = Metrics { + election_event: MetricsUnit { + num_emails_sent: 0, + num_sms_sent: 0, + }, + metrics_by_election_id: Default::default(), + }; + + let Some(communication_method) = body.communication_method.clone() else { + return Err(Error::String("Missing template method".into())); + }; + + for user in filtered_users.iter() { + let success = send_template_email_or_sms( + &hasura_transaction, + user, + &election_event, + &tenant_id, + Some(admin_id.clone()), + &body.email, + &body.sms, + &email_sender, + &sms_sender, + Some(communication_method.clone()), + ) + .await; + update_metrics( + &mut metrics, + &elections_by_area, + user, + /* communication_method */ &communication_method, + /* success */ success.is_ok(), + ); + } + + let page_len: i32 = users.len().try_into().map_err(|err| anyhow!("{err}"))?; + processed = processed + .checked_add(page_len) + .expect("send_template processed count overflow"); + + // update stats + update_stats( &hasura_transaction, - user, - &election_event, &tenant_id, - Some(admin_id.clone()), - &body.email, - &body.sms, - &email_sender, - &sms_sender, - Some(communication_method.clone()), + &election_event_id, + &metrics, ) - .await; - update_metrics( - &mut metrics, - &elections_by_area, - user, - /* communication_method */ &communication_method, - /* success */ success.is_ok(), - ); - } + .await + .with_context(|| "Error updating stats")?; - let page_len: i32 = users.len().try_into().map_err(|err| anyhow!("{err}"))?; - processed = processed - .checked_add(page_len) - .expect("send_template processed count overflow"); - - // update stats - update_stats( - &hasura_transaction, - &tenant_id, - &election_event_id, - &metrics, - ) - .await - .with_context(|| "Error updating stats")?; + hasura_transaction + .commit() + .await + .with_context(|| "Error committing update stats transaction")?; - hasura_transaction + if processed >= total_count { + break; + } + } + keycloak_transaction .commit() .await - .with_context(|| "Error committing update stats transaction")?; + .with_context(|| "error comitting transaction")?; - if processed >= total_count { - break; - } + Ok(()) } - keycloak_transaction - .commit() - .await - .with_context(|| "error comitting transaction")?; - - Ok(()) } +pub use send_template_task::send_template; /// In the case of rejection: /// admin_id and election_event are not needed so both can be set to None. @@ -543,6 +599,10 @@ pub async fn send_template( /// /// In the case of acceptance: /// All the fields are required. +/// +/// # Errors +/// +/// Returns an error when the chosen channel’s send path fails after attempting delivery. #[instrument(err, skip(election_event, email_sender, sms_sender))] pub async fn send_template_email_or_sms( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/tasks/set_public_key.rs b/packages/windmill/src/tasks/set_public_key.rs index c229f5473f..a1ac8fbde0 100644 --- a/packages/windmill/src/tasks/set_public_key.rs +++ b/packages/windmill/src/tasks/set_public_key.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Updates trustee mixnet public keys from uploaded material. use crate::postgres::{keys_ceremony, trustee}; use crate::services::ceremonies::keys_ceremony::get_keys_ceremony_board; use crate::services::ceremonies::serialize_logs::generate_logs; @@ -28,6 +28,11 @@ use std::collections::HashSet; use strand::signature::StrandSignaturePk; use tracing::{event, info, instrument, Level}; +/// Derives a trustee’s key status in the keys ceremony. +/// +/// # Errors +/// +/// Returns an error if a stored public key is not valid DER/base64. #[instrument(skip(trustees_hasura, messages), err)] fn get_trustee_status( trustee_name: &str, @@ -58,6 +63,11 @@ fn get_trustee_status( } } +/// Reads mixnet board messages and updates keys ceremony JSON status. +/// +/// # Errors +/// +/// Propagates database, board fetch, serialization, trustee mismatch, or commit failures. pub async fn set_public_key_impl( tenant_id: String, election_event_id: String, @@ -172,15 +182,25 @@ pub async fn set_public_key_impl( Ok(()) } -#[instrument(err)] -#[wrap_map_err::wrap_map_err(TaskError)] -#[celery::task(max_retries = 0)] -pub async fn set_public_key( - tenant_id: String, - election_event_id: String, - keys_ceremony_id: String, -) -> Result<()> { - set_public_key_impl(tenant_id, election_event_id, keys_ceremony_id).await?; +mod set_public_key_task { + #![allow(missing_docs)] + #![allow(clippy::missing_docs_in_private_items)] - Ok(()) + use super::*; + + /// Celery task: pulls the published mixnet public key for a keys ceremony and persists trustee/ceremony status. + #[instrument(err)] + #[wrap_map_err::wrap_map_err(TaskError)] + #[celery::task(max_retries = 0)] + pub async fn set_public_key( + tenant_id: String, + election_event_id: String, + keys_ceremony_id: String, + ) -> Result<()> { + set_public_key_impl(tenant_id, election_event_id, keys_ceremony_id).await?; + + Ok(()) + } } + +pub use set_public_key_task::set_public_key; diff --git a/packages/windmill/src/tasks/update_election_event_ballot_styles.rs b/packages/windmill/src/tasks/update_election_event_ballot_styles.rs index 4f4609677d..8304d5826d 100644 --- a/packages/windmill/src/tasks/update_election_event_ballot_styles.rs +++ b/packages/windmill/src/tasks/update_election_event_ballot_styles.rs @@ -1,13 +1,16 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Updates ballot style JSON attached to an election event +#![allow(missing_docs)] +#![allow(clippy::missing_docs_in_private_items)] use celery::error::TaskError; use tracing::instrument; use crate::services::ballot_styles::ballot_style; use crate::types::error::Result; +/// Update Election Event Ballot Styles. #[instrument(err)] #[wrap_map_err::wrap_map_err(TaskError)] #[celery::task(max_retries = 0)] diff --git a/packages/windmill/src/tasks/upsert_areas.rs b/packages/windmill/src/tasks/upsert_areas.rs index cf549a5f94..d5070fbae4 100644 --- a/packages/windmill/src/tasks/upsert_areas.rs +++ b/packages/windmill/src/tasks/upsert_areas.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Upserts voting areas from an imported document. use crate::postgres::area::{get_event_areas, insert_areas, upsert_area_parents}; use crate::postgres::area_contest::insert_area_contests; use crate::postgres::contest::export_contests; @@ -19,6 +19,11 @@ use std::io::Seek; use tracing::instrument; use uuid::Uuid; +/// Modifies areas from an import document with existing event areas and updates the database. +/// +/// # Errors +/// +/// Returns an error if the pool, transaction, document lookup, or CSV processing fails. #[instrument(err)] pub async fn upsert_areas_task( tenant_id: String, diff --git a/packages/windmill/src/types/application.rs b/packages/windmill/src/types/application.rs index d639268637..38eff1708b 100644 --- a/packages/windmill/src/types/application.rs +++ b/packages/windmill/src/types/application.rs @@ -2,23 +2,31 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Voter enrollment application enums (status, type, rejection reasons, and error tags). use serde::{Deserialize, Serialize}; use strum_macros::{Display, EnumString, EnumVariantNames}; +/// Lifecycle state of a voter's enrollment application. #[derive( Display, Debug, PartialEq, Eq, Clone, EnumString, EnumVariantNames, Serialize, Deserialize, )] pub enum ApplicationStatus { + /// Awaiting review. PENDING, + /// Application was approved. ACCEPTED, + /// Application was denied. REJECTED, } +/// Whether the application is resolved by automated rules or requires manual review. #[derive( Display, Debug, PartialEq, Eq, Clone, EnumString, EnumVariantNames, Serialize, Deserialize, )] pub enum ApplicationType { + /// Matched or rejected by system rules. AUTOMATIC, + /// Left open for an administrator to review. MANUAL, } @@ -35,13 +43,18 @@ pub enum ApplicationType { Serialize, Deserialize, )] +/// Machine-readable reason stored when an application is rejected. pub enum ApplicationRejectReason { + /// Submitted data did not satisfy minimum requirements for matching. #[strum(to_string = "insufficient-information")] INSUFFICIENT_INFORMATION, + /// No voter record matched the supplied identity or credentials. #[strum(to_string = "no-matching-voter")] NO_VOTER, + /// A voter with the same enrollment is already approved for this election. #[strum(to_string = "voter-already-approved")] ALREADY_APPROVED, + /// Rejection reason not covered by the specific variants above. #[default] #[strum(to_string = "other")] OTHER, //mandatory comment @@ -51,7 +64,9 @@ pub enum ApplicationRejectReason { #[derive( Display, Debug, PartialEq, Eq, Clone, EnumString, EnumVariantNames, Serialize, Deserialize, )] +/// Tags application-related failures surfaced to clients. pub enum ApplicationsError { + /// The target voter already has an approved application, so the action is invalid. #[strum(serialize = "Approved_Voter")] #[strum(to_string = "Approved_Voter")] APPROVED_VOTER, diff --git a/packages/windmill/src/types/documents.rs b/packages/windmill/src/types/documents.rs index 9d85c0255c..3818e924e7 100644 --- a/packages/windmill/src/types/documents.rs +++ b/packages/windmill/src/types/documents.rs @@ -1,9 +1,11 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Export/import document types and filenames. use strum_macros::{Display, EnumString, EnumVariantNames}; +/// Document types for export/import operations. +#[allow(missing_docs)] #[derive(Display, Debug, PartialEq, Eq, Clone, EnumString, EnumVariantNames)] pub enum EDocuments { ELECTION_EVENT, @@ -21,11 +23,13 @@ pub enum EDocuments { PUBLICATIONS, TALLY, IMAGES, + /// Election event configuration presentation for the voting portal. ELECTION_EVENT_CONFIG, CERTIFICATES, } impl EDocuments { + /// Base filename segment used when writing this document into an export archive. pub fn to_file_name(&self) -> &str { match self { EDocuments::ELECTION_EVENT => "export_election_event", @@ -49,6 +53,8 @@ impl EDocuments { } } +/// Document types for tally resultsexport operations. +#[allow(missing_docs)] pub enum ETallyDocuments { TALLY_SESSION, TALLY_SESSION_CONTEST, @@ -63,6 +69,7 @@ pub enum ETallyDocuments { } impl ETallyDocuments { + /// Base filename segment used when exporting this tally document. pub fn to_file_name(&self) -> &str { match self { ETallyDocuments::TALLY_SESSION => "export_tally_session", diff --git a/packages/windmill/src/types/error.rs b/packages/windmill/src/types/error.rs index 725b717c62..25be6c68a9 100644 --- a/packages/windmill/src/types/error.rs +++ b/packages/windmill/src/types/error.rs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! Top-level error enum and conversions from external failures. use celery; use celery::prelude::TaskError; @@ -9,28 +10,37 @@ use keycloak; use sequent_core::util::integrity_check::HashFileVerifyError; use serde_json; use strand::util::StrandError; + quick_error! { + /// Error surface for tasks and helpers, convertible to Celery [`TaskError`]. #[derive(Debug)] pub enum Error { + /// Failure propagated through an [`anyhow::Error`] context chain. Anyhow(err: anyhow::Error) { from() } + /// CSV export/import error. Csv(err: csv::Error) { from() } + /// Free-form message, including stringified errors from external crates. String(err: String) { from() from(err: &str) -> (err.into()) } + /// PostgreSQL driver or query execution failure. Postgres(err: tokio_postgres::Error) { from() } + /// I/O error on a specific filesystem path. FileAccess(path: std::path::PathBuf, err: std::io::Error) { display("An error occurred while accessing the file at '{}': {}", path.display(), err) } + /// Integer conversion exceeded the target type range. TryFromIntError(err: std::num::TryFromIntError) { from() } + /// Ballot or artifact hash did not match the expected digest. HashFileVerifyError(err: HashFileVerifyError) { from() display("{}", err.to_string()) @@ -104,4 +114,5 @@ impl From for Error { } } +/// Result type specialized with [`Error`] as the default `E`. pub type Result = std::result::Result; diff --git a/packages/windmill/src/types/hasura_types.rs b/packages/windmill/src/types/hasura_types.rs index c475314c19..e3a9d850bf 100644 --- a/packages/windmill/src/types/hasura_types.rs +++ b/packages/windmill/src/types/hasura_types.rs @@ -1,14 +1,24 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + #![allow(non_camel_case_types)] +//! Rust shapes for GraphQL types where Hasura exposes PostgreSQL scalars. + use serde_json::Value; +/// GraphQL / Hasura `uuid` column serialized as a string in JSON payloads. pub type uuid = String; +/// GraphQL / Hasura `jsonb` column as arbitrary JSON ([`serde_json::Value`]). pub type jsonb = Value; +/// RFC 3339 timestamp string from a `timestamptz` column. pub type timestamptz = String; +/// Hex or base64-encoded binary from a `bytea` column. pub type bytea = String; +/// Unbounded text column (`text`). pub type text = String; +/// Bounded text column (`varchar`). pub type varchar = String; +/// Numeric column exposed as a floating-point value in generated types. pub type numeric = f64; diff --git a/packages/windmill/src/types/miru_plugin.rs b/packages/windmill/src/types/miru_plugin.rs index b1550fe703..836c8c5f3d 100644 --- a/packages/windmill/src/types/miru_plugin.rs +++ b/packages/windmill/src/types/miru_plugin.rs @@ -1,76 +1,120 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only +//! JSON payloads for Miru (SBEI) signing servers, documents, and transmission packages. + use sequent_core::types::ceremonies::Log; use serde::{Deserialize, Serialize}; use strum_macros::Display; use strum_macros::EnumString; +/// Cryptographic signature produced by a Miru instance for one outbound document. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiruSignature { + /// Miru user or installation identifier. pub sbei_miru_id: String, + /// Public key material used to verify `signature`. pub pub_key: String, + /// Signature bytes over the document hash or payload. pub signature: String, + /// Fingerprint of the X.509 certificate bound to this signature. pub certificate_fingerprint: String, } +/// Delivery outcome when a document was pushed to a Miru server. +#[allow(missing_docs)] #[derive(Display, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, EnumString)] pub enum MiruServerDocumentStatus { SUCCESS, ERROR, } +/// One Miru server's response for a document send attempt. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiruServerDocument { + /// Hostname or logical Miru node name. pub name: String, - pub sent_at: String, // date using ISO8601/rfc3339 + /// ISO8601/rfc3339 timestamp when the document was submitted. + pub sent_at: String, + /// Whether the submission succeeded on this server. pub status: MiruServerDocumentStatus, } +/// Stable identifiers Miru assigns to each document format in a transmission. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiruDocumentIds { #[serde(default)] + /// Identifier for the EML election interchange payload. pub eml: String, + /// Identifier for the compressed XZ bundle. pub xz: String, + /// Identifier for the multi-format archive sent to all servers. pub all_servers: String, } +/// One logical document (possibly multiple formats) and its per-server delivery trail. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiruDocument { + /// Miru-side IDs for each serialized representation. pub document_ids: MiruDocumentIds, + /// End-to-end transaction id grouping related sends. pub transaction_id: String, + /// Per-target-server send results for this document. pub servers_sent_to: Vec, + /// Creation time of this record in ISO 8601 / RFC 3339 form. pub created_at: String, + /// Signatures collected from Miru for this document set. pub signatures: Vec, } +/// Miru CCS endpoint. #[derive(Eq, PartialEq, Serialize, Deserialize, Debug, Clone)] pub struct MiruCcsServer { + /// Human-readable server label. pub name: String, + /// Deployment or version tag for the node. pub tag: String, + /// Network location of the Miru API. pub address: String, + /// PEM-encoded public key advertised by the server. pub public_key_pem: String, + /// When true, this node should receive ceremony log excerpts. pub send_logs: Option, } +/// Everything needed to build or audit one Miru transmission package for an election area. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct MiruTransmissionPackageData { + /// Election this package belongs to. pub election_id: String, + /// Area within the election. pub area_id: String, + /// Miru CCS servers for this package. pub servers: Vec, + /// Documents and receipts exchanged with Miru for this dispatch. pub documents: Vec, + /// Ceremony log lines included for external verification. pub logs: Vec, + /// Cryptographic threshold parameter for the tally ceremony. pub threshold: i64, } +/// Miru-side account. #[derive(PartialEq, Eq, Serialize, Deserialize, Debug, Clone)] pub struct MiruSbeiUser { + /// Username. pub username: String, + /// Miru's internal user id string. pub miru_id: String, + /// Role name as Miru models it (e.g. operator vs observer). pub miru_role: String, + /// Display name shown in Miru UIs. pub miru_name: String, + /// Election identifier Miru uses for scoping requests. pub miru_election_id: String, + /// Certificate fingerprint when the user is bound to a client cert. pub certificate_fingerprint: Option, } +/// Ordered list of per-area transmission packages loaded for a tally session. pub type MiruTallySessionData = Vec; diff --git a/packages/windmill/src/types/mod.rs b/packages/windmill/src/types/mod.rs index 345951c2f5..8a48f81ebb 100644 --- a/packages/windmill/src/types/mod.rs +++ b/packages/windmill/src/types/mod.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: AGPL-3.0-only +//! Shared error types, task payloads, and cross-crate DTOs used by workers and services. + pub mod application; pub mod documents; pub mod error; diff --git a/packages/windmill/src/types/resources.rs b/packages/windmill/src/types/resources.rs index c3d7bbe8f0..e256468003 100644 --- a/packages/windmill/src/types/resources.rs +++ b/packages/windmill/src/types/resources.rs @@ -1,34 +1,45 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only + +//! Database query result types and pagination helpers. use anyhow::anyhow; use electoral_log::assign_value; use immudb_rs::{sql_value::Value, Client, NamedParam, Row, SqlValue}; use serde::{Deserialize, Serialize}; use strum_macros::{Display, EnumString}; +/// Single-column `COUNT(*)` (or equivalent) extracted from an SQL row. #[derive(Serialize, Deserialize, Debug)] pub struct Aggregate { + /// Number of matching rows for the aggregate expression. pub count: i64, } +/// Wrapper GraphQL uses for `aggregate { count }` style totals. #[derive(Serialize, Deserialize, Debug)] pub struct TotalAggregate { + /// Nested aggregate payload. pub aggregate: Aggregate, } -// Enumeration for the valid order directions +/// Enumeration for the valid order directions #[derive(Debug, Deserialize, EnumString, Display, Clone)] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum OrderDirection { + /// Ascending key order. Asc, + /// Descending key order. Desc, } +/// Page of rows plus total count metadata from a list query. #[derive(Serialize, Deserialize, Debug)] pub struct DataList { + /// Rows returned for the current page. pub items: Vec, + /// Total number of rows matching the filter (not only this page). pub total: TotalAggregate, } diff --git a/packages/windmill/src/types/tasks.rs b/packages/windmill/src/types/tasks.rs index 3df608c002..80e257edd3 100644 --- a/packages/windmill/src/types/tasks.rs +++ b/packages/windmill/src/types/tasks.rs @@ -1,40 +1,68 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only - +//! Celery task identifiers and their human-readable display names. use strum_macros::{Display, EnumString, EnumVariantNames}; +/// Celery-backed long-running jobs Windmill registers by name. #[derive(Display, Debug, PartialEq, Eq, Clone, EnumString, EnumVariantNames)] pub enum ETasksExecution { + /// Export the full election event. EXPORT_ELECTION_EVENT, + /// Export tenant-level configuration. EXPORT_TENANT_CONFIG, + /// Restore tenant configuration from an export bundle. IMPORT_TENANT_CONFIG, + /// Import candidates. IMPORT_CANDIDATES, + /// Import voters. IMPORT_USERS, + /// Create a new election event. CREATE_ELECTION_EVENT, + /// Restore an election from an exported archive. IMPORT_ELECTION_EVENT, + /// Export the voter register for an event. EXPORT_VOTERS, + /// Build the transmission package for signing. CREATE_TRANSMISSION_PACKAGE, + /// Export published ballot material for auditors. EXPORT_BALLOT_PUBLICATION, + /// Generate the activity log spreadsheet or PDF report. EXPORT_ACTIVITY_LOGS_REPORT, + /// Produce a voter-facing ballot receipt artifact. CREATE_BALLOT_RECEIPT, + /// Run a configured report template against live data. GENERATE_REPORT, + /// Materialize a document template with current placeholders. GENERATE_TEMPLATE, + /// Emit the transmission audit report after Miru dispatch. GENERATE_TRANSMISSION_REPORT, + /// Export voter enrollment applications for an event. EXPORT_APPLICATION, + /// Import applications. IMPORT_APPLICATION, + /// Export trustee keys and ceremony configuration. EXPORT_TRUSTEES, + /// Render a document to PDF. RENDER_DOCUMENT_PDF, + /// Create a new tenant. CREATE_TENANT, + /// Export templates. EXPORT_TEMPLATES, + /// Import templates. IMPORT_TEMPLATES, + /// Delete an election event. DELETE_ELECTION_EVENT, + /// Prepare publication preview assets. PREPARE_PUBLICATION_PREVIEW, + /// Spreadsheet export of decoded tally results. EXPORT_TALLY_RESULTS_XLSX, + /// Export certificate authorities. EXPORT_CERTIFICATE_AUTHORITIES, } impl ETasksExecution { + /// Human-readable label shown in admin UIs and logs for this task. pub fn to_name(&self) -> &str { match self { ETasksExecution::EXPORT_ELECTION_EVENT => "Export Election Event",