diff --git a/packages/windmill/Cargo.toml b/packages/windmill/Cargo.toml index d784990b5c..5e557db011 100644 --- a/packages/windmill/Cargo.toml +++ b/packages/windmill/Cargo.toml @@ -145,17 +145,32 @@ dhat = "0.3" [lints.rustdoc] missing_crate_level_docs = "deny" - +broken_intra_doc_links = "deny" [lints.rust] missing_docs = "deny" - +unsafe_code = "forbid" +private_interfaces = "warn" +private_bounds = "warn" +unnameable_types = "warn" +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage,coverage_nightly)'] } [lints.clippy] missing_docs_in_private_items = "deny" missing_errors_doc = "deny" missing_panics_doc = "deny" +doc_markdown = "deny" +unwrap_used = "deny" +panic = "deny" +shadow_unrelated = "deny" +print_stdout = "deny" +print_stderr = "deny" +indexing_slicing = "deny" +missing_const_for_fn = "deny" +future_not_send = "deny" arithmetic_side_effects = "deny" +suspicious = "deny" complexity = "deny" style = "deny" -perf = "deny" \ No newline at end of file +perf = "deny" +pedantic = "deny" \ No newline at end of file diff --git a/packages/windmill/external-bin/generate_logs.rs b/packages/windmill/external-bin/generate_logs.rs index 6c7509062d..62a4fa786d 100644 --- a/packages/windmill/external-bin/generate_logs.rs +++ b/packages/windmill/external-bin/generate_logs.rs @@ -43,7 +43,7 @@ struct Cli { } #[derive(Deserialize, Debug)] -/// Configuration for the generate_logs tool. +/// Configuration for the `generate_logs` tool. struct Config { /// Immudb URL immudb_url: String, @@ -67,7 +67,7 @@ fn sanitize_filename(name: &str) -> String { .collect() } -/// Constructs the immudb board name from tenant_id and election_event_id. +/// Constructs the immudb board name from `tenant_id` and `election_event_id`. /// Replicates logic from `packages/windmill/src/services/protocol_manager.rs`. fn get_event_board_name(tenant_id: &str, election_event_id: &str) -> String { let tenant: String = tenant_id @@ -76,7 +76,7 @@ fn get_event_board_name(tenant_id: &str, election_event_id: &str) -> String { .filter(|&c| c != '-') .take(17) .collect(); - format!("tenant{}event{}", tenant, election_event_id) + format!("tenant{tenant}event{election_event_id}") .chars() .filter(|&c| c != '-') .collect() @@ -100,6 +100,7 @@ async fn connect_immudb(config: &Config) -> Result { } #[tokio::main] +#[allow(clippy::too_many_lines)] async fn main() -> Result<()> { // Initialize tracing subscriber // Default to `info` level for this crate if RUST_LOG is not set. @@ -155,7 +156,7 @@ async fn main() -> Result<()> { client .open_session(&board_name) .await - .with_context(|| format!("Failed to open session to board: {}", board_name))?; + .with_context(|| format!("Failed to open session to board: {board_name}"))?; info!(%board_name, "Successfully opened session to board."); let mut total_rows_fetched: i32 = 0; @@ -237,17 +238,13 @@ async fn main() -> Result<()> { Some(id) => config .elections .get(id) - .map(|s| s.as_str()) - .unwrap_or(id) - .to_string(), + .map_or(id.clone(), String::to_string), None => "general_logs".to_string(), }; let sanitized_stem = sanitize_filename(&filename_stem_key); if !csv_writers.contains_key(&sanitized_stem) { - let csv_path = cli - .output_folder_path - .join(format!("{}.csv", sanitized_stem)); + let csv_path = cli.output_folder_path.join(format!("{sanitized_stem}.csv")); info!(file_path = %csv_path.display(), election_id_key = %filename_stem_key, "Creating new CSV file."); let file = File::create(&csv_path).with_context(|| { format!("Failed to create CSV file: {}", csv_path.display()) @@ -279,10 +276,10 @@ async fn main() -> Result<()> { } info!("Finished processing all batches from Immudb stream."); - for (filename_stem, writer) in csv_writers.iter_mut() { + for (filename_stem, writer) in &mut csv_writers { writer .flush() - .with_context(|| format!("Failed to flush CSV writer for {}", filename_stem))?; + .with_context(|| format!("Failed to flush CSV writer for {filename_stem}"))?; info!( filename_stem, count = activity_log_written_counts.get(filename_stem).unwrap_or(&0), diff --git a/packages/windmill/src/bin/beat.rs b/packages/windmill/src/bin/beat.rs index 95746b0696..20b95a9e2f 100644 --- a/packages/windmill/src/bin/beat.rs +++ b/packages/windmill/src/bin/beat.rs @@ -1,6 +1,6 @@ #![allow(non_upper_case_globals)] #![recursion_limit = "256"] -//! Celery Beat process for Windmill: registers periodic tasks and publishes them to RabbitMQ. +//! Celery Beat process for Windmill: registers periodic tasks and publishes them to `RabbitMQ`. // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only @@ -23,18 +23,18 @@ use windmill::tasks::scheduled_reports::scheduled_reports; #[derive(Debug, Parser)] #[command(name = "beat", about = "Windmill's periodic task scheduler.")] struct CeleryOpt { - /// Interval between `review_boards` dispatches. - #[arg(short = 'r', long, default_value = "15")] - review_boards_interval: u64, - /// Interval between `scheduled_events` dispatches. - #[arg(short = 's', long, default_value = "10")] - schedule_events_interval: u64, - /// Interval between `scheduled_reports` dispatches. - #[arg(short = 'c', long, default_value = "60")] - schedule_reports_interval: u64, - /// Interval between `electoral_log_batch_dispatcher` dispatches. - #[arg(short = 'e', long, default_value = "5")] - electoral_log_interval: u64, + /// Interval between `review_boards` dispatches (seconds). + #[arg(short = 'r', long = "review-boards-interval", default_value = "15")] + review_boards: u64, + /// Interval between `scheduled_events` dispatches (seconds). + #[arg(short = 's', long = "schedule-events-interval", default_value = "10")] + schedule_events: u64, + /// Interval between `scheduled_reports` dispatches (seconds). + #[arg(short = 'c', long = "schedule-reports-interval", default_value = "60")] + schedule_reports: u64, + /// Interval between `electoral_log_batch_dispatcher` dispatches (seconds). + #[arg(short = 'e', long = "electoral-log-interval", default_value = "5")] + electoral_log: u64, } /// Starts the beat scheduler: loads env, wires periodic tasks, and blocks until shutdown. @@ -50,22 +50,22 @@ async fn main() -> Result<()> { tasks = [ review_boards::NAME => { review_boards, - schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().review_boards_interval)), + schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().review_boards)), args = (), }, scheduled_events::NAME => { scheduled_events, - schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().schedule_events_interval)), - args = (CeleryOpt::parse().schedule_events_interval), + schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().schedule_events)), + args = (CeleryOpt::parse().schedule_events), }, scheduled_reports::NAME => { scheduled_reports, - schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().schedule_reports_interval)), - args = (CeleryOpt::parse().schedule_events_interval), + schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().schedule_reports)), + args = (CeleryOpt::parse().schedule_events), }, electoral_log_batch_dispatcher::NAME => { electoral_log_batch_dispatcher, - schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().electoral_log_interval)), + schedule = DeltaSchedule::new(Duration::from_secs(CeleryOpt::parse().electoral_log)), args = (), }, ], diff --git a/packages/windmill/src/bin/main.rs b/packages/windmill/src/bin/main.rs index 34de5601c8..78e90534e1 100644 --- a/packages/windmill/src/bin/main.rs +++ b/packages/windmill/src/bin/main.rs @@ -1,5 +1,6 @@ #![allow(non_upper_case_globals)] #![recursion_limit = "256"] +#![allow(clippy::non_std_lazy_statics)] //! Celery worker binary for Windmill: runs the Celery app as a queue consumer or in produce-only mode. // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // @@ -17,7 +18,11 @@ use sequent_core::util::init_log::init_log; use std::collections::HashMap; use tokio::runtime::Builder; use tracing::{event, Level}; -use windmill::services::celery_app::*; +use windmill::services::celery_app::{ + get_celery_app, get_worker_threads, set_acks_late, set_broker_connection_max_retries, + set_heartbeat, set_is_app_active, set_prefetch_count, set_queues, set_task_max_retries, + set_worker_threads, Queue, +}; use windmill::services::probe::{setup_probe, AppName}; use windmill::services::tasks_semaphore::init_semaphore; @@ -26,22 +31,21 @@ use windmill::services::tasks_semaphore::init_semaphore; /// # Panics /// /// Panics if `ENV_SLUG` is not set in the environment. -fn get_queue_name(queue: Queue) -> String { - let slug = std::env::var("ENV_SLUG") - .with_context(|| "missing env var ENV_SLUG") - .unwrap(); +fn get_queue_name(queue: &Queue) -> String { + let slug = + std::env::var("ENV_SLUG").expect("ENV_SLUG must be set before resolving AMQP queue names"); queue.queue_name(&slug) } lazy_static! { - static ref BEAT_QUEUE_NAME: String = get_queue_name(Queue::Beat); - static ref SHORT_QUEUE_NAME: String = get_queue_name(Queue::Short); - static ref ELECTORAL_LOG_BEAT_QUEUE_NAME: String = get_queue_name(Queue::ElectoralLogBeat); - static ref COMMUNICATION_QUEUE_NAME: String = get_queue_name(Queue::Communication); - static ref TALLY_QUEUE_NAME: String = get_queue_name(Queue::Tally); - static ref REPORTS_QUEUE_NAME: String = get_queue_name(Queue::Reports); - static ref IMPORT_EXPORT_QUEUE_NAME: String = get_queue_name(Queue::ImportExport); - static ref ELECTORAL_LOG_BATCH_QUEUE_NAME: String = get_queue_name(Queue::ElectoralLogBatch); + static ref BEAT_QUEUE_NAME: String = get_queue_name(&Queue::Beat); + static ref SHORT_QUEUE_NAME: String = get_queue_name(&Queue::Short); + static ref ELECTORAL_LOG_BEAT_QUEUE_NAME: String = get_queue_name(&Queue::ElectoralLogBeat); + static ref COMMUNICATION_QUEUE_NAME: String = get_queue_name(&Queue::Communication); + static ref TALLY_QUEUE_NAME: String = get_queue_name(&Queue::Tally); + static ref REPORTS_QUEUE_NAME: String = get_queue_name(&Queue::Reports); + static ref IMPORT_EXPORT_QUEUE_NAME: String = get_queue_name(&Queue::ImportExport); + static ref ELECTORAL_LOG_BATCH_QUEUE_NAME: String = get_queue_name(&Queue::ElectoralLogBatch); } /// Celery options for the Windmill Celery worker process. @@ -76,11 +80,11 @@ enum CeleryOpt { Produce, } -/// Finds duplicates in a vector of strings. -fn find_duplicates(input: Vec<&str>) -> Vec<&str> { +/// Finds duplicates in a slice of queue name strings. +fn find_duplicates<'a>(input: &'a [&'a str]) -> Vec<&'a str> { let mut occurrences = HashMap::new(); let mut duplicates = Vec::new(); - for &item in &input { + for &item in input { let count: &mut i32 = occurrences.entry(item).or_insert(0); *count = (*count) .checked_add(1) @@ -126,6 +130,9 @@ fn main() -> Result<(), Box> { } /// Runs the Celery app. +/// +/// `celery`'s broker delivery stream is not `Send`; the worker runs on a single runtime thread. +#[allow(clippy::future_not_send)] async fn async_main(opt: CeleryOpt) -> Result<()> { init_log(true); setup_probe(AppName::WINDMILL).await; @@ -157,15 +164,15 @@ async fn async_main(opt: CeleryOpt) -> Result<()> { if queue_name.starts_with(&slug) { queue_name.clone() } else { - format!("{}_{}", slug, queue_name) + format!("{slug}_{queue_name}") } }) .collect(); let vec_str: Vec<&str> = queues.iter().map(AsRef::as_ref).collect(); - let duplicates = find_duplicates(vec_str.clone()); + let duplicates = find_duplicates(&vec_str); if !duplicates.is_empty() { - return Err(anyhow!("Found duplicate queues: {:?}", duplicates)); + return Err(anyhow!("Found duplicate queues: {duplicates:?}")); } set_queues(queues.clone()); set_is_app_active(true); @@ -178,6 +185,6 @@ async fn async_main(opt: CeleryOpt) -> Result<()> { event!(Level::INFO, "No new tasks to produce"); celery_app.close().await?; } - }; + } Ok(()) } diff --git a/packages/windmill/src/postgres/application.rs b/packages/windmill/src/postgres/application.rs index a7f734d998..981ac7edc8 100644 --- a/packages/windmill/src/postgres/application.rs +++ b/packages/windmill/src/postgres/application.rs @@ -3,6 +3,8 @@ // SPDX-License-Identifier: AGPL-3.0-only use std::collections::HashMap; +use std::fmt::Write as _; +use std::hash::BuildHasher; use crate::{ services::application::ApplicationAnnotations, @@ -114,20 +116,20 @@ pub async fn get_permission_label_from_post( Ok((permission_label, area_id)) } + /// Insert application into the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip_all)] -pub async fn insert_application( +pub async fn insert_application( hasura_transaction: &Transaction<'_>, tenant_id: &str, election_event_id: &str, area_id: &Option, applicant_id: &str, - applicant_data: &HashMap, + applicant_data: &HashMap, labels: &Option, annotations: &ApplicationAnnotations, verification_type: &ApplicationType, @@ -194,7 +196,6 @@ pub async fn insert_application( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip_all)] pub async fn update_application_status( hasura_transaction: &Transaction<'_>, @@ -299,6 +300,7 @@ pub async fn update_application_status( Ok(application) } + /// Get applications for a given area, tenant and election event from the database. /// /// # Errors @@ -310,7 +312,6 @@ pub async fn update_application_status( /// /// Panics only if internal SQL placeholder arithmetic overflows, /// which is not expected in production-sized filters. - #[instrument(err, skip_all)] pub async fn get_applications( hasura_transaction: &Transaction<'_>, @@ -344,13 +345,13 @@ pub async fn get_applications( let status; let verification_type; if let Some(filters) = filters { - query.push_str(format!(" AND status = ${param_index}").as_str()); + write!(query, " AND status = ${param_index}").expect("writing to String"); status = filters.clone().status.to_string(); params.push(&status); param_index = param_index.checked_add(1).expect("param_index overflow"); if filters.verification_type.is_some() { - query.push_str(&format!(" AND verification_type = ${param_index}")); + write!(query, " AND verification_type = ${param_index}").expect("writing to String"); verification_type = filters .verification_type .clone() @@ -364,14 +365,14 @@ pub async fn get_applications( query.push_str(" ORDER BY created_at ASC, id ASC"); let lim: i64; if let Some(limit) = limit { - query.push_str(&format!(" LIMIT ${param_index}")); + write!(query, " LIMIT ${param_index}").expect("writing to String"); lim = limit; params.push(&lim); param_index = param_index.checked_add(1).expect("param_index overflow"); } let off: i64; if let Some(offset) = offset { - query.push_str(&format!(" OFFSET ${param_index}")); + write!(query, " OFFSET ${param_index}").expect("writing to String"); off = offset; params.push(&off); } @@ -401,13 +402,16 @@ pub async fn get_applications( Some( offset .unwrap_or(0) - .checked_add(results.len() as i64) + .checked_add( + i64::try_from(results.len()).expect("result count fits in i64 for offset"), + ) .expect("last_offset overflow"), ) }; Ok((results, last_offset)) } + /// Counts applications based on filters. /// /// # Errors @@ -419,7 +423,6 @@ pub async fn get_applications( /// /// Panics only if internal SQL placeholder arithmetic overflows, /// which is not expected in production-sized filters. - #[instrument(err, skip_all)] pub async fn count_applications( hasura_transaction: &Transaction<'_>, @@ -437,7 +440,7 @@ pub async fn count_applications( .expect("current_param_place overflow"); "AND area_id = $3 ".to_string() } - None => "".to_string(), + None => String::new(), }; let mut query = format!( @@ -463,8 +466,8 @@ pub async fn count_applications( optional_area_id = Some(parsed_area_id); // Store the value in the variable } - if let Some(ref area_id) = optional_area_id { - params.push(area_id); // Push the reference to the vector + if let Some(area_uuid) = optional_area_id.as_ref() { + params.push(area_uuid); } use serde_json::Value as JsonValue; @@ -477,9 +480,11 @@ pub async fn count_applications( role_json = JsonValue::Array(vec![JsonValue::String(role.to_string())]); let place = current_param_place.to_string(); // Add the dynamic role condition to the query - query.push_str(&format!( + write!( + query, " AND (annotations->>'verified_by_role')::jsonb @> ${place}::jsonb" - )); + ) + .expect("writing to String"); // Push the actual String, not a reference params.push(&role_json); // Now `role_json` is moved into `params`, not borrowed current_param_place = current_param_place @@ -492,7 +497,7 @@ pub async fn count_applications( let verification_type; if let Some(filters) = filters { let place = current_param_place.to_string(); - query.push_str(&format!(" AND status = ${place}")); + write!(query, " AND status = ${place}").expect("writing to String"); status = filters.clone().status.to_string(); params.push(&status); current_param_place = current_param_place @@ -500,8 +505,8 @@ pub async fn count_applications( .expect("current_param_place overflow"); if filters.verification_type.is_some() { - let place = current_param_place.to_string(); - query.push_str(&format!(" AND verification_type = ${place}")); + let vtype_place = current_param_place.to_string(); + write!(query, " AND verification_type = ${vtype_place}").expect("writing to String"); verification_type = filters .verification_type .clone() @@ -512,8 +517,7 @@ pub async fn count_applications( } let statement = hasura_transaction.prepare(&query).await.map_err(|err| { - // Print the error before returning it - eprintln!("Error in query: {err:?}"); + event!(Level::ERROR, ?err, "count_applications prepare failed"); anyhow!("Error preparing the application query: {err}") })?; @@ -521,8 +525,7 @@ pub async fn count_applications( .query_one(&statement, ¶ms) .await .map_err(|err| { - // Print the error before returning it - eprintln!("Error in row: {err:?}"); + event!(Level::ERROR, ?err, "count_applications query_one failed"); anyhow!("Error during query: {err}") })?; @@ -530,13 +533,13 @@ pub async fn count_applications( Ok(count) } + /// Get applications for a given election from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip_all)] pub async fn get_applications_by_election( hasura_transaction: &Transaction<'_>, @@ -635,8 +638,8 @@ pub async fn insert_applications( &application.applicant_data, &application.labels, &application.annotations, - &application.verification_type.to_string(), - &application.status.to_string(), + &application.verification_type, + &application.status, ], ) .await diff --git a/packages/windmill/src/postgres/area.rs b/packages/windmill/src/postgres/area.rs index 1e74008478..a3286e6833 100644 --- a/packages/windmill/src/postgres/area.rs +++ b/packages/windmill/src/postgres/area.rs @@ -40,7 +40,7 @@ impl TryFrom for AreaWrapper { } } /// Returns a vector of areas per election event, with the posibility of -/// filtering by area_id +/// filtering by `area_id` /// /// # Errors /// @@ -376,7 +376,7 @@ pub async fn upsert_area_parents( /// field is invalid, or if an `INSERT` fails at the database layer. #[instrument(err, skip_all)] pub async fn insert_areas(hasura_transaction: &Transaction<'_>, areas: &[Area]) -> Result<()> { - let tree_node_areas: Vec = areas.iter().map(|area| area.into()).collect(); + let tree_node_areas: Vec = areas.iter().map(std::convert::Into::into).collect(); let areas_tree = TreeNode::<()>::from_areas(tree_node_areas)?; let areas_map: HashMap = areas .iter() diff --git a/packages/windmill/src/postgres/ballot_publication.rs b/packages/windmill/src/postgres/ballot_publication.rs index e4c83b50f4..34f1944df7 100644 --- a/packages/windmill/src/postgres/ballot_publication.rs +++ b/packages/windmill/src/postgres/ballot_publication.rs @@ -101,7 +101,6 @@ pub async fn get_ballot_publication_by_id( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - pub async fn update_ballot_publication_status( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -158,7 +157,6 @@ pub async fn update_ballot_publication_status( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - pub async fn update_ballot_publication( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -522,7 +520,7 @@ pub async fn soft_delete_other_ballot_publications( let election_id_str = match election_uuid { Some(_) => "AND election_id = $4".to_string(), - None => "".to_string(), + None => String::new(), }; // Publication update query diff --git a/packages/windmill/src/postgres/cast_vote.rs b/packages/windmill/src/postgres/cast_vote.rs index 265350402b..786728ec8a 100644 --- a/packages/windmill/src/postgres/cast_vote.rs +++ b/packages/windmill/src/postgres/cast_vote.rs @@ -92,16 +92,17 @@ pub async fn insert_cast_vote( .await .map_err(|err| anyhow!("Error inserting cast vote: {err}"))?; - let cast_votes: Vec = rows + let mut cast_votes: Vec = rows .into_iter() .map(|row| -> Result { row.try_into() }) .collect::>>()?; - if 1 == cast_votes.len() { - Ok(cast_votes[0].clone()) - } else { - Err(anyhow!("Unexpected rows affected {}", cast_votes.len())) + if cast_votes.len() == 1 { + return cast_votes + .pop() + .ok_or_else(|| anyhow!("expected exactly one cast vote row from insert")); } + Err(anyhow!("Unexpected rows affected {}", cast_votes.len())) } /// Get cast votes for a given tenant, election event, election id and voter id from the database. @@ -160,7 +161,7 @@ pub async fn get_cast_votes( ], ) .await - .map_err(|err| anyhow!("Error getting cast votes: {}", err))?; + .map_err(|err| anyhow!("Error getting cast votes: {err}"))?; let cast_votes: Vec = rows .into_iter() diff --git a/packages/windmill/src/postgres/contest.rs b/packages/windmill/src/postgres/contest.rs index 2a907e52b0..d7da525011 100644 --- a/packages/windmill/src/postgres/contest.rs +++ b/packages/windmill/src/postgres/contest.rs @@ -34,9 +34,9 @@ impl TryFrom for ContestWrapper { is_active: item.try_get("is_active")?, description: item.try_get("description")?, presentation: item.try_get("presentation")?, - min_votes: min_votes.map(|val| val as i64), - max_votes: max_votes.map(|val| val as i64), - winning_candidates_num: winning_candidates_num.map(|val| val as i64), + min_votes: min_votes.map(i64::from), + max_votes: max_votes.map(i64::from), + winning_candidates_num: winning_candidates_num.map(i64::from), voting_type: item.try_get("voting_type")?, counting_algorithm: item.try_get("counting_algorithm")?, is_encrypted: item.try_get("is_encrypted")?, @@ -62,6 +62,27 @@ pub async fn insert_contest( for contest in &data.contests { contest.validate()?; + let min_votes_sql: Option = contest + .min_votes + .map(i32::try_from) + .transpose() + .map_err(|_| anyhow!("min_votes out of i32 range for contest {}", contest.id))?; + let max_votes_sql: Option = contest + .max_votes + .map(i32::try_from) + .transpose() + .map_err(|_| anyhow!("max_votes out of i32 range for contest {}", contest.id))?; + let winning_candidates_num_sql: Option = contest + .winning_candidates_num + .map(i32::try_from) + .transpose() + .map_err(|_| { + anyhow!( + "winning_candidates_num out of i32 range for contest {}", + contest.id + ) + })?; + let statement = hasura_transaction .prepare( r" @@ -87,14 +108,14 @@ pub async fn insert_contest( &contest.is_active, &contest.description, &contest.presentation, - &contest.min_votes.map(|val| val as i32), - &contest.max_votes.map(|val| val as i32), + &min_votes_sql, + &max_votes_sql, &contest.voting_type, &contest.counting_algorithm, &contest.is_encrypted, &contest.tally_configuration, &contest.conditions, - &contest.winning_candidates_num.map(|val| val as i32), + &winning_candidates_num_sql, &contest.image_document_id, &contest.external_id, ], diff --git a/packages/windmill/src/postgres/document.rs b/packages/windmill/src/postgres/document.rs index 891424c2ee..6bcdba1183 100644 --- a/packages/windmill/src/postgres/document.rs +++ b/packages/windmill/src/postgres/document.rs @@ -135,7 +135,7 @@ pub async fn get_document( Ok(documents.first().cloned()) } -/// Returns a vector of tuples of the (SupportMaterial, Document)s +/// Returns a vector of tuples of the (`SupportMaterial`, `Document`)s /// associated with a given election event. /// /// # Errors @@ -226,9 +226,8 @@ pub async fn insert_document( is_public: bool, document_id: Option, ) -> Result { - let document_uuid: uuid::Uuid = document_id - .map(|id| parse_uuid_v4(&id)) - .unwrap_or(Ok(Uuid::new_v4()))?; + let document_uuid: uuid::Uuid = + document_id.map_or(Ok(Uuid::new_v4()), |id| parse_uuid_v4(&id))?; let election_event_uuid: Option = election_event_id.map(|id| parse_uuid_v4(&id)).transpose()?; diff --git a/packages/windmill/src/postgres/election.rs b/packages/windmill/src/postgres/election.rs index c72cdc2b10..2c5318845b 100644 --- a/packages/windmill/src/postgres/election.rs +++ b/packages/windmill/src/postgres/election.rs @@ -35,7 +35,7 @@ impl TryFrom for ElectionWrapper { status: item.try_get("status")?, eml: item.try_get("eml")?, external_id: item.try_get("external_id")?, - num_allowed_revotes: num_allowed_revotes.map(|val| val as i64), + num_allowed_revotes: num_allowed_revotes.map(i64::from), is_consolidated_ballot_encoding: item.try_get("is_consolidated_ballot_encoding")?, spoil_ballot_option: item.try_get("spoil_ballot_option")?, is_kiosk: item.try_get("is_kiosk")?, @@ -53,7 +53,7 @@ impl TryFrom for ElectionWrapper { } /// Returns a vector of areas per election event, with the posibility of -/// filtering by area_id +/// filtering by `area_id` /// /// # Errors /// @@ -98,7 +98,9 @@ pub async fn get_election_max_revotes( .map(|row| { let num_allowed_revotes: Option = row.try_get("num_allowed_revotes")?; - Ok(num_allowed_revotes.unwrap_or(1) as usize) + let n = num_allowed_revotes.unwrap_or(1); + usize::try_from(n) + .map_err(|_| anyhow!("num_allowed_revotes must be non-negative, got {n}")) }) .collect::>>()?; @@ -113,7 +115,6 @@ pub async fn get_election_max_revotes( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(skip(hasura_transaction), err)] pub async fn get_election_by_id( hasura_transaction: &Transaction<'_>, @@ -157,13 +158,13 @@ pub async fn get_election_by_id( Ok(elections.first().cloned()) } + /// Get all elections for a given tenant and election event from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(skip(hasura_transaction), err)] pub async fn get_elections( hasura_transaction: &Transaction<'_>, @@ -202,13 +203,13 @@ pub async fn get_elections( Ok(elections) } + /// Get elections by ids from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_by_ids( hasura_transaction: &Transaction<'_>, @@ -258,13 +259,13 @@ pub async fn get_elections_by_ids( Ok(elections) } + /// Get elections by keys ceremony id from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_by_keys_ceremony_id( hasura_transaction: &Transaction<'_>, @@ -491,13 +492,14 @@ pub async fn create_election( .cloned() .ok_or(anyhow!("Coudln't insert election")) } + /// Insert multiple elections for a given election event into the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - +#[allow(clippy::too_many_lines)] #[instrument(err, skip_all)] pub async fn insert_elections( hasura_transaction: &Transaction<'_>, @@ -510,6 +512,16 @@ pub async fn insert_elections( .clone() .map(|val| parse_uuid_v4(&val)) .transpose()?; + let num_allowed_revotes_sql: Option = election + .num_allowed_revotes + .map(i32::try_from) + .transpose() + .map_err(|_| { + anyhow!( + "num_allowed_revotes out of i32 range for election {}", + election.id + ) + })?; let statement = hasura_transaction .prepare( r" @@ -582,7 +594,7 @@ pub async fn insert_elections( &election.presentation, &election.status, &election.eml, - &election.num_allowed_revotes.map(|val| val as i32), + &num_allowed_revotes_sql, &election.is_consolidated_ballot_encoding, &election.spoil_ballot_option, &election.voting_channels, @@ -602,13 +614,13 @@ pub async fn insert_elections( Ok(()) } + /// Get all elections for a given tenant and election event from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip_all)] pub async fn export_elections( hasura_transaction: &Transaction<'_>, @@ -649,13 +661,13 @@ pub async fn export_elections( Ok(elections) } + /// Set election keys ceremony for an election and returns the updated row when applicable. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip(hasura_transaction))] pub async fn set_election_keys_ceremony( hasura_transaction: &Transaction<'_>, @@ -712,12 +724,12 @@ pub async fn set_election_keys_ceremony( Ok(elections) } + /// Update for an election if the initialization report was generated. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip(hasura_transaction))] pub async fn set_election_initialization_report_generated( hasura_transaction: &Transaction<'_>, @@ -756,13 +768,13 @@ pub async fn set_election_initialization_report_generated( Ok(()) } + /// Updates election status and returns the updated row when applicable. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip(hasura_transaction))] pub async fn update_election_status( hasura_transaction: &Transaction<'_>, @@ -824,13 +836,13 @@ pub async fn update_election_status( Ok(results) } + /// Get all elections ids for a given tenant and election event from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(skip(hasura_transaction), err)] pub async fn get_elections_ids( hasura_transaction: &Transaction<'_>, @@ -869,13 +881,13 @@ pub async fn get_elections_ids( Ok(elections) } + /// Get election permission label from the database. /// /// # Errors /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - #[instrument(err, skip(hasura_transaction))] pub async fn get_election_permission_label( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/postgres/election_event.rs b/packages/windmill/src/postgres/election_event.rs index 3ff28e6f14..6beb6e3009 100644 --- a/packages/windmill/src/postgres/election_event.rs +++ b/packages/windmill/src/postgres/election_event.rs @@ -243,7 +243,6 @@ pub async fn get_all_tenant_election_events( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - pub async fn update_election_event_annotations( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -285,7 +284,6 @@ pub async fn update_election_event_annotations( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails, or if row mapping is inconsistent. - pub async fn update_election_event_presentation( hasura_transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/postgres/keys_ceremony.rs b/packages/windmill/src/postgres/keys_ceremony.rs index 5c54d65d90..082a0ee202 100644 --- a/packages/windmill/src/postgres/keys_ceremony.rs +++ b/packages/windmill/src/postgres/keys_ceremony.rs @@ -26,13 +26,13 @@ impl TryFrom for KeysCeremonyWrapper { trustee_ids: item .try_get::<_, Vec>("trustee_ids")? .iter() - .map(|uuid| uuid.to_string()) + .map(ToString::to_string) .collect(), status: item.try_get("status")?, execution_status: item.try_get("execution_status")?, labels: item.try_get("labels")?, annotations: item.try_get("annotations")?, - threshold: item.try_get::<_, i32>("threshold")? as i64, + threshold: i64::from(item.try_get::<_, i32>("threshold")?), name: item.try_get("name")?, settings: item.try_get("settings")?, is_default: item.try_get("is_default")?, diff --git a/packages/windmill/src/postgres/mod.rs b/packages/windmill/src/postgres/mod.rs index 6f271f3391..c68c5da1e8 100644 --- a/packages/windmill/src/postgres/mod.rs +++ b/packages/windmill/src/postgres/mod.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: AGPL-3.0-only -//! PostgreSQL access for Windmill against the `sequent_backend` schema exposed via Hasura. +//! `PostgreSQL` access for Windmill against the `sequent_backend` schema exposed via Hasura. /// Enrollment applications and related filters for an election event. pub mod application; diff --git a/packages/windmill/src/postgres/reports.rs b/packages/windmill/src/postgres/reports.rs index 4dcc2c5168..bbff41dd45 100644 --- a/packages/windmill/src/postgres/reports.rs +++ b/packages/windmill/src/postgres/reports.rs @@ -253,7 +253,7 @@ pub async fn get_report_by_id( Ok(reports.first().cloned()) } -/// Returns ONLY THE FIRST the template_alias which matches these arguments, +/// Returns ONLY THE FIRST the `template_alias` which matches these arguments, /// If there are multiple matches, the rest are ignored. /// /// # Errors @@ -319,7 +319,7 @@ pub async fn get_template_alias_for_report( // Election Id was set, but maybe we find the report if we don't set it, // at the election event level as a fallback - let statement = hasura_transaction + let fallback_statement = hasura_transaction .prepare( r#" SELECT @@ -336,16 +336,16 @@ pub async fn get_template_alias_for_report( .await .map_err(|err| anyhow!("Error preparing query: {err}"))?; - let rows = hasura_transaction + let fallback_rows = hasura_transaction .query( - &statement, + &fallback_statement, &[&tenant_uuid, &election_event_uuid, &report_type.to_string()], ) .await .map_err(|err| anyhow!("Error executing query: {err}"))?; // If found, return - if let Some(row) = rows.first() { + if let Some(row) = fallback_rows.first() { let template_alias: Option = row.get("template_alias"); return Ok(template_alias); } diff --git a/packages/windmill/src/postgres/results_area_contest.rs b/packages/windmill/src/postgres/results_area_contest.rs index 8463c98dc5..5c0435e9a1 100644 --- a/packages/windmill/src/postgres/results_area_contest.rs +++ b/packages/windmill/src/postgres/results_area_contest.rs @@ -9,9 +9,10 @@ use rust_decimal::prelude::ToPrimitive; use rust_decimal::Decimal; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::results::*; +use sequent_core::types::results::{ResultDocuments, ResultsAreaContest}; use serde::{Deserialize, Serialize}; use serde_json::Value; +use std::cmp::Ordering; use tokio_postgres::row::Row; use tokio_postgres::types::ToSql; use tracing::{info, instrument}; @@ -37,19 +38,19 @@ impl TryFrom for ResultsAreaContestWrapper { results_event_id: item.try_get::<_, Uuid>("results_event_id")?.to_string(), elegible_census: item .try_get::<_, Option>("elegible_census")? - .map(|val| val as i64), + .map(i64::from), total_valid_votes: item .try_get::<_, Option>("total_valid_votes")? - .map(|val| val as i64), + .map(i64::from), explicit_invalid_votes: item .try_get::<_, Option>("explicit_invalid_votes")? - .map(|val| val as i64), + .map(i64::from), implicit_invalid_votes: item .try_get::<_, Option>("implicit_invalid_votes")? - .map(|val| val as i64), + .map(i64::from), blank_votes: item .try_get::<_, Option>("blank_votes")? - .map(|val| val as i64), + .map(i64::from), created_at: item.get("created_at"), last_updated_at: item.get("last_updated_at"), labels: item.try_get("labels")?, @@ -61,7 +62,7 @@ impl TryFrom for ResultsAreaContestWrapper { .transpose()?, total_invalid_votes: item .try_get::<_, Option>("total_invalid_votes")? - .map(|val| val as i64), + .map(i64::from), total_invalid_votes_percent: item .try_get::<_, Decimal>("total_invalid_votes_percent")? .to_f64() @@ -84,7 +85,7 @@ impl TryFrom for ResultsAreaContestWrapper { .transpose()?, total_votes: item .try_get::<_, Option>("total_votes")? - .map(|val| val as i64), + .map(i64::from), total_votes_percent: item .try_get::<_, Decimal>("total_votes_percent")? .to_f64() @@ -93,7 +94,7 @@ impl TryFrom for ResultsAreaContestWrapper { documents, total_auditable_votes: item .try_get::<_, Option>("total_auditable_votes")? - .map(|val| val as i64), + .map(i64::from), total_auditable_votes_percent: item .try_get::<_, Decimal>("total_auditable_votes_percent")? .to_f64() @@ -169,15 +170,13 @@ pub async fn update_results_area_contest_documents( .await .map_err(|err| anyhow!("Error running the areas query: {err}"))?; - if 1 == rows.len() { - Ok(()) - } else if rows.len() > 1 { - Err(anyhow!( - "Too many affected rows in table results_area_contest: {}", - rows.len() - )) - } else { - Err(anyhow!("Rows not found in table results_area_contest")) + let row_count = rows.len(); + match row_count.cmp(&1) { + Ordering::Equal => Ok(()), + Ordering::Greater => Err(anyhow!( + "Too many affected rows in table results_area_contest: {row_count}" + )), + Ordering::Less => Err(anyhow!("Rows not found in table results_area_contest")), } } /// Get results area contest from the database. @@ -263,7 +262,7 @@ pub async fn get_results_area_contest( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - +#[allow(clippy::too_many_lines)] #[instrument(err, skip(hasura_transaction, area_contests))] pub async fn insert_results_area_contests( hasura_transaction: &Transaction<'_>, @@ -320,27 +319,27 @@ pub async fn insert_results_area_contests( results_event_id: results_event_uuid, elegible_census: area_contest.elegible_census, total_votes: area_contest.total_votes, - total_votes_percent: area_contest.total_votes_percent.map(|n| n.into()), + total_votes_percent: area_contest.total_votes_percent.map(Into::into), total_auditable_votes: area_contest.total_auditable_votes, total_auditable_votes_percent: area_contest .total_auditable_votes_percent - .map(|n| n.into()), + .map(Into::into), total_valid_votes: area_contest.total_valid_votes, - total_valid_votes_percent: area_contest.total_valid_votes_percent.map(|n| n.into()), + total_valid_votes_percent: area_contest.total_valid_votes_percent.map(Into::into), total_invalid_votes: area_contest.total_invalid_votes, total_invalid_votes_percent: area_contest .total_invalid_votes_percent - .map(|n| n.into()), + .map(Into::into), explicit_invalid_votes: area_contest.explicit_invalid_votes, explicit_invalid_votes_percent: area_contest .explicit_invalid_votes_percent - .map(|n| n.into()), + .map(Into::into), implicit_invalid_votes: area_contest.implicit_invalid_votes, implicit_invalid_votes_percent: area_contest .implicit_invalid_votes_percent - .map(|n| n.into()), + .map(Into::into), blank_votes: area_contest.blank_votes, - blank_votes_percent: area_contest.blank_votes_percent.map(|n| n.into()), + blank_votes_percent: area_contest.blank_votes_percent.map(Into::into), annotations: area_contest.annotations.clone(), }) }) @@ -527,7 +526,7 @@ struct InsertableResultsAreaContest { /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - +#[allow(clippy::too_many_lines)] #[instrument(err, skip(hasura_transaction, records))] pub async fn insert_many_results_area_contests( hasura_transaction: &Transaction<'_>, @@ -559,23 +558,23 @@ pub async fn insert_many_results_area_contests( last_updated_at: r.last_updated_at, labels: r.labels.clone(), annotations: r.annotations.clone(), - total_valid_votes_percent: r.total_valid_votes_percent.map(|v| v.into_inner()), + total_valid_votes_percent: r.total_valid_votes_percent.map(NotNan::into_inner), total_invalid_votes: r.total_invalid_votes, - total_invalid_votes_percent: r.total_invalid_votes_percent.map(|v| v.into_inner()), + total_invalid_votes_percent: r.total_invalid_votes_percent.map(NotNan::into_inner), explicit_invalid_votes_percent: r .explicit_invalid_votes_percent - .map(|v| v.into_inner()), - blank_votes_percent: r.blank_votes_percent.map(|v| v.into_inner()), + .map(NotNan::into_inner), + blank_votes_percent: r.blank_votes_percent.map(NotNan::into_inner), implicit_invalid_votes_percent: r .implicit_invalid_votes_percent - .map(|v| v.into_inner()), + .map(NotNan::into_inner), total_votes: r.total_votes, - total_votes_percent: r.total_votes_percent.map(|v| v.into_inner()), + total_votes_percent: r.total_votes_percent.map(NotNan::into_inner), documents: documents_json, total_auditable_votes: r.total_auditable_votes, total_auditable_votes_percent: r .total_auditable_votes_percent - .map(|v| v.into_inner()), + .map(NotNan::into_inner), }) }) .collect::>()?; diff --git a/packages/windmill/src/postgres/results_area_contest_candidate.rs b/packages/windmill/src/postgres/results_area_contest_candidate.rs index db7dda0c3c..8821b4b0ed 100644 --- a/packages/windmill/src/postgres/results_area_contest_candidate.rs +++ b/packages/windmill/src/postgres/results_area_contest_candidate.rs @@ -9,7 +9,7 @@ use rust_decimal::prelude::ToPrimitive; use rust_decimal::Decimal; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::results::*; +use sequent_core::types::results::{ResultDocuments, ResultsAreaContestCandidate}; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio_postgres::row::Row; @@ -38,15 +38,11 @@ impl TryFrom for ResultsAreaContestCandidateWrapper { area_id: item.try_get::<_, Uuid>("area_id")?.to_string(), candidate_id: item.try_get::<_, Uuid>("candidate_id")?.to_string(), results_event_id: item.try_get::<_, Uuid>("results_event_id")?.to_string(), - cast_votes: item - .try_get::<_, Option>("cast_votes")? - .map(|val| val as i64), + cast_votes: item.try_get::<_, Option>("cast_votes")?.map(i64::from), winning_position: item .try_get::<_, Option>("winning_position")? - .map(|val| val as i64), - points: item - .try_get::<_, Option>("points")? - .map(|val| val as i64), + .map(i64::from), + points: item.try_get::<_, Option>("points")?.map(i64::from), created_at: item.get("created_at"), last_updated_at: item.get("last_updated_at"), labels: item.try_get("labels")?, @@ -184,7 +180,7 @@ pub async fn insert_results_area_contest_candidates( cast_votes: contest_candidate.cast_votes, winning_position: contest_candidate.winning_position, points: contest_candidate.points, - cast_votes_percent: contest_candidate.cast_votes_percent.map(|n| n.into()), + cast_votes_percent: contest_candidate.cast_votes_percent.map(Into::into), }) }) .collect::>>()?; @@ -362,7 +358,7 @@ pub async fn insert_many_results_area_contest_candidates( last_updated_at: c.last_updated_at, labels: c.labels.clone(), annotations: c.annotations.clone(), - cast_votes_percent: c.cast_votes_percent.map(|v| v.into_inner()), + cast_votes_percent: c.cast_votes_percent.map(NotNan::into_inner), documents: documents_json, }) }) diff --git a/packages/windmill/src/postgres/results_contest.rs b/packages/windmill/src/postgres/results_contest.rs index 0a9e9ec065..9c688bfac6 100644 --- a/packages/windmill/src/postgres/results_contest.rs +++ b/packages/windmill/src/postgres/results_contest.rs @@ -9,9 +9,10 @@ use rust_decimal::prelude::ToPrimitive; use rust_decimal::Decimal; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::results::*; +use sequent_core::types::results::{ResultDocuments, ResultsContest}; use serde::{Deserialize, Serialize}; use serde_json::Value; +use std::cmp::Ordering; use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; @@ -36,10 +37,10 @@ impl TryFrom for ResultsContestWrapper { results_event_id: item.try_get::<_, Uuid>("results_event_id")?.to_string(), elegible_census: item .try_get::<_, Option>("elegible_census")? - .map(|val| val as i64), + .map(i64::from), total_valid_votes: item .try_get::<_, Option>("total_valid_votes")? - .map(|val| val as i64), + .map(i64::from), total_auditable_votes_percent: item .try_get::<_, Decimal>("total_auditable_votes_percent")? .to_f64() @@ -47,13 +48,13 @@ impl TryFrom for ResultsContestWrapper { .transpose()?, explicit_invalid_votes: item .try_get::<_, Option>("explicit_invalid_votes")? - .map(|val| val as i64), + .map(i64::from), implicit_invalid_votes: item .try_get::<_, Option>("implicit_invalid_votes")? - .map(|val| val as i64), + .map(i64::from), blank_votes: item .try_get::<_, Option>("blank_votes")? - .map(|val| val as i64), + .map(i64::from), voting_type: item.try_get("voting_type")?, counting_algorithm: item.try_get("counting_algorithm")?, name: item.try_get("name")?, @@ -63,7 +64,7 @@ impl TryFrom for ResultsContestWrapper { annotations: item.try_get("annotations")?, total_invalid_votes: item .try_get::<_, Option>("total_invalid_votes")? - .map(|val| val as i64), + .map(i64::from), total_invalid_votes_percent: item .try_get::<&str, Decimal>("total_invalid_votes_percent")? .to_f64() @@ -91,7 +92,7 @@ impl TryFrom for ResultsContestWrapper { .transpose()?, total_votes: item .try_get::<_, Option>("total_votes")? - .map(|val| val as i64), + .map(i64::from), total_votes_percent: item .try_get::<&str, Decimal>("total_votes_percent")? .to_f64() @@ -100,7 +101,7 @@ impl TryFrom for ResultsContestWrapper { documents, total_auditable_votes: item .try_get::<_, Option>("total_auditable_votes")? - .map(|val| val as i64), + .map(i64::from), })) } } @@ -165,15 +166,13 @@ pub async fn update_results_contest_documents( .await .map_err(|err| anyhow!("Error running the areas query: {err}"))?; - if 1 == rows.len() { - Ok(()) - } else if rows.len() > 1 { - Err(anyhow!( - "Too many affected rows in table results_contest: {}", - rows.len() - )) - } else { - Err(anyhow!("Rows not found in table results_contest")) + let row_count = rows.len(); + match row_count.cmp(&1) { + Ordering::Equal => Ok(()), + Ordering::Greater => Err(anyhow!( + "Too many affected rows in table results_contest: {row_count}" + )), + Ordering::Less => Err(anyhow!("Rows not found in table results_contest")), } } /// Get results contest from the database. @@ -246,7 +245,7 @@ pub async fn get_results_contest( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - +#[allow(clippy::too_many_lines)] #[instrument(err, skip(hasura_transaction, contests))] pub async fn insert_results_contests( hasura_transaction: &Transaction<'_>, @@ -304,25 +303,25 @@ pub async fn insert_results_contests( results_event_id: results_event_uuid, elegible_census: contest.elegible_census, total_votes: contest.total_votes, - total_votes_percent: contest.total_votes_percent.map(|n| n.into()), + total_votes_percent: contest.total_votes_percent.map(Into::into), total_auditable_votes: contest.total_auditable_votes, total_auditable_votes_percent: contest .total_auditable_votes_percent - .map(|n| n.into()), + .map(Into::into), total_valid_votes: contest.total_valid_votes, - total_valid_votes_percent: contest.total_valid_votes_percent.map(|n| n.into()), + total_valid_votes_percent: contest.total_valid_votes_percent.map(Into::into), total_invalid_votes: contest.total_invalid_votes, - total_invalid_votes_percent: contest.total_invalid_votes_percent.map(|n| n.into()), + total_invalid_votes_percent: contest.total_invalid_votes_percent.map(Into::into), explicit_invalid_votes: contest.explicit_invalid_votes, explicit_invalid_votes_percent: contest .explicit_invalid_votes_percent - .map(|n| n.into()), + .map(Into::into), implicit_invalid_votes: contest.implicit_invalid_votes, implicit_invalid_votes_percent: contest .implicit_invalid_votes_percent - .map(|n| n.into()), + .map(Into::into), blank_votes: contest.blank_votes, - blank_votes_percent: contest.blank_votes_percent.map(|n| n.into()), + blank_votes_percent: contest.blank_votes_percent.map(Into::into), voting_type: contest.voting_type.clone(), counting_algorithm: contest.counting_algorithm.clone(), name: contest.name.clone(), @@ -520,7 +519,7 @@ struct InsertableResultsContest { /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - +#[allow(clippy::too_many_lines)] #[instrument(err, skip(hasura_transaction, results_contests))] pub async fn insert_many_results_contests( hasura_transaction: &Transaction<'_>, @@ -555,22 +554,22 @@ pub async fn insert_many_results_contests( labels: c.labels.clone(), annotations: c.annotations.clone(), total_invalid_votes: c.total_invalid_votes, - total_invalid_votes_percent: c.total_invalid_votes_percent.map(|v| v.into_inner()), - total_valid_votes_percent: c.total_valid_votes_percent.map(|v| v.into_inner()), + total_invalid_votes_percent: c.total_invalid_votes_percent.map(NotNan::into_inner), + total_valid_votes_percent: c.total_valid_votes_percent.map(NotNan::into_inner), explicit_invalid_votes_percent: c .explicit_invalid_votes_percent - .map(|v| v.into_inner()), + .map(NotNan::into_inner), implicit_invalid_votes_percent: c .implicit_invalid_votes_percent - .map(|v| v.into_inner()), - blank_votes_percent: c.blank_votes_percent.map(|v| v.into_inner()), + .map(NotNan::into_inner), + blank_votes_percent: c.blank_votes_percent.map(NotNan::into_inner), total_votes: c.total_votes, - total_votes_percent: c.total_votes_percent.map(|v| v.into_inner()), + total_votes_percent: c.total_votes_percent.map(NotNan::into_inner), documents: documents_json, total_auditable_votes: c.total_auditable_votes, total_auditable_votes_percent: c .total_auditable_votes_percent - .map(|v| v.into_inner()), + .map(NotNan::into_inner), }) }) .collect::>()?; diff --git a/packages/windmill/src/postgres/results_contest_candidate.rs b/packages/windmill/src/postgres/results_contest_candidate.rs index 468e4db379..b422847696 100644 --- a/packages/windmill/src/postgres/results_contest_candidate.rs +++ b/packages/windmill/src/postgres/results_contest_candidate.rs @@ -9,7 +9,7 @@ use rust_decimal::prelude::ToPrimitive; use rust_decimal::Decimal; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::results::*; +use sequent_core::types::results::{ResultDocuments, ResultsContestCandidate}; use serde::{Deserialize, Serialize}; use serde_json::Value; use tokio_postgres::row::Row; @@ -35,15 +35,11 @@ impl TryFrom for ResultsContestCandidateWrapper { contest_id: item.try_get::<_, Uuid>("contest_id")?.to_string(), candidate_id: item.try_get::<_, Uuid>("candidate_id")?.to_string(), results_event_id: item.try_get::<_, Uuid>("results_event_id")?.to_string(), - cast_votes: item - .try_get::<_, Option>("cast_votes")? - .map(|val| val as i64), + cast_votes: item.try_get::<_, Option>("cast_votes")?.map(i64::from), winning_position: item .try_get::<_, Option>("winning_position")? - .map(|val| val as i64), - points: item - .try_get::<_, Option>("points")? - .map(|val| val as i64), + .map(i64::from), + points: item.try_get::<_, Option>("points")?.map(i64::from), created_at: item.get("created_at"), last_updated_at: item.get("last_updated_at"), labels: item.try_get("labels")?, @@ -108,7 +104,7 @@ pub async fn insert_results_contest_candidates( cast_votes: contest_candidate.cast_votes, winning_position: contest_candidate.winning_position, points: contest_candidate.points, - cast_votes_percent: contest_candidate.cast_votes_percent.map(|n| n.into()), + cast_votes_percent: contest_candidate.cast_votes_percent.map(Into::into), }) }) .collect::>>()?; @@ -280,7 +276,7 @@ pub async fn insert_many_results_contest_candidates( last_updated_at: c.last_updated_at, labels: c.labels.clone(), annotations: c.annotations.clone(), - cast_votes_percent: c.cast_votes_percent.map(|v| v.into_inner()), + cast_votes_percent: c.cast_votes_percent.map(NotNan::into_inner), documents: documents_json, }) }) diff --git a/packages/windmill/src/postgres/results_election.rs b/packages/windmill/src/postgres/results_election.rs index b30968d2ff..7c4a17782d 100644 --- a/packages/windmill/src/postgres/results_election.rs +++ b/packages/windmill/src/postgres/results_election.rs @@ -9,10 +9,10 @@ use rust_decimal::prelude::ToPrimitive; use rust_decimal::Decimal; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::results::ResultDocuments; -use sequent_core::types::results::*; +use sequent_core::types::results::{ResultDocuments, ResultsElection}; use serde::{Deserialize, Serialize}; use serde_json::Value; +use std::cmp::Ordering; use tokio_postgres::row::Row; use tracing::{info, instrument}; use uuid::Uuid; @@ -37,10 +37,10 @@ impl TryFrom for ResultsElectionWrapper { name: item.try_get("name")?, elegible_census: item .try_get::<_, Option>("elegible_census")? - .map(|v| v as i64), + .map(i64::from), total_voters: item .try_get::<_, Option>("total_voters")? - .map(|v| v as i64), + .map(i64::from), created_at: item.get("created_at"), last_updated_at: item.get("last_updated_at"), labels: item.try_get("labels")?, @@ -118,15 +118,13 @@ pub async fn update_results_election_documents( .await .map_err(|err| anyhow!("Error running the results_election query: {err}"))?; - if 1 == rows.len() { - Ok(()) - } else if rows.len() > 1 { - Err(anyhow!( - "Too many affected rows in table results_election: {}", - rows.len() - )) - } else { - Err(anyhow!("Rows not found in table results_election")) + let row_count = rows.len(); + match row_count.cmp(&1) { + Ordering::Equal => Ok(()), + Ordering::Greater => Err(anyhow!( + "Too many affected rows in table results_election: {row_count}" + )), + Ordering::Less => Err(anyhow!("Rows not found in table results_election")), } } /// Insert results elections into the database. @@ -176,7 +174,7 @@ pub async fn insert_results_elections( name: election.name.clone(), elegible_census: election.elegible_census, total_voters: election.total_voters, - total_voters_percent: election.total_voters_percent.map(|n| n.into()), + total_voters_percent: election.total_voters_percent.map(Into::into), }) }) .collect::>>()?; @@ -436,7 +434,7 @@ pub async fn insert_many_results_elections( name: r.name.clone(), elegible_census: r.elegible_census, total_voters: r.total_voters, - total_voters_percent: r.total_voters_percent.map(|n| n.into_inner()), + total_voters_percent: r.total_voters_percent.map(NotNan::into_inner), created_at: r.created_at, last_updated_at: r.last_updated_at, labels: r.labels.clone(), diff --git a/packages/windmill/src/postgres/results_election_area.rs b/packages/windmill/src/postgres/results_election_area.rs index 73f99872e4..5f2e2b98b4 100644 --- a/packages/windmill/src/postgres/results_election_area.rs +++ b/packages/windmill/src/postgres/results_election_area.rs @@ -100,7 +100,7 @@ pub async fn insert_results_election_area_documents( ], ) .await - .map_err(|err| anyhow!("Error at inser into results_election_area {} ", err))?; + .map_err(|err| anyhow!("Error at inser into results_election_area {err} "))?; Ok(()) } /// Get event results election area from the database. diff --git a/packages/windmill/src/postgres/results_event.rs b/packages/windmill/src/postgres/results_event.rs index aefb8e7b81..40a79b3d4b 100644 --- a/packages/windmill/src/postgres/results_event.rs +++ b/packages/windmill/src/postgres/results_event.rs @@ -6,9 +6,10 @@ use chrono::{DateTime, Local}; use deadpool_postgres::Transaction; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::results::*; +use sequent_core::types::results::{ResultDocuments, ResultsEvent}; use serde::Serialize; use serde_json::Value; +use std::cmp::Ordering; use tokio_postgres::row::Row; use tracing::instrument; use uuid::Uuid; @@ -88,15 +89,13 @@ pub async fn update_results_event_documents( .await .map_err(|err| anyhow!("Error running the areas query: {err}"))?; - if 1 == rows.len() { - Ok(()) - } else if rows.len() > 1 { - Err(anyhow!( - "Too many affected rows in table results_event: {}", - rows.len() - )) - } else { - Err(anyhow!("Rows not found in table results_event")) + let row_count = rows.len(); + match row_count.cmp(&1) { + Ordering::Equal => Ok(()), + Ordering::Greater => Err(anyhow!( + "Too many affected rows in table results_event: {row_count}" + )), + Ordering::Less => Err(anyhow!("Rows not found in table results_event")), } } /// Get results event by id from the database. diff --git a/packages/windmill/src/postgres/scheduled_event.rs b/packages/windmill/src/postgres/scheduled_event.rs index f322b44fda..ae7f9297ce 100644 --- a/packages/windmill/src/postgres/scheduled_event.rs +++ b/packages/windmill/src/postgres/scheduled_event.rs @@ -5,10 +5,9 @@ use anyhow::{anyhow, Context, Result}; use chrono::{DateTime, Utc}; use deadpool_postgres::Transaction; use sequent_core::services::uuid_validation::parse_uuid_v4; -use sequent_core::types::scheduled_event::*; use sequent_core::{ serialization::deserialize_with_path::deserialize_value, - types::scheduled_event::{EventProcessors, ScheduledEvent}, + types::scheduled_event::{CronConfig, EventProcessors, ScheduledEvent}, }; use serde_json::Value; use std::str::FromStr; @@ -413,7 +412,7 @@ pub async fn insert_scheduled_event( .await .map_err(|err| anyhow!("Error inserting scheduled event: {err}"))?; - let rows: Vec = rows + let mut events: Vec = rows .into_iter() .map(|row| -> Result { row.try_into() @@ -422,11 +421,12 @@ pub async fn insert_scheduled_event( .collect::>>() .map_err(|err| anyhow!("Error deserializing scheduled event: {err}"))?; - if 1 == rows.len() { - Ok(rows[0].clone()) - } else { - Err(anyhow!("Unexpected rows affected {}", rows.len())) + if events.len() == 1 { + return events + .pop() + .ok_or_else(|| anyhow!("expected exactly one scheduled event row from insert")); } + Err(anyhow!("Unexpected rows affected {}", events.len())) } /// Get scheduled event by election event id from the database. /// @@ -620,14 +620,16 @@ pub async fn insert_new_scheduled_event( .map(|res: ScheduledEventWrapper| -> ScheduledEvent { res.0 }) }) .collect::>>() - .map_err(|err| anyhow!("Error deserializing new scheduled event: {}", err))?; + .map_err(|err| anyhow!("Error deserializing new scheduled event: {err}"))?; if rows.len() == 1 { - Ok(rows[0].clone()) - } else { - Err(anyhow!( - "Unexpected number of rows affected: {}", - rows.len() - )) + let mut rows = rows; + return rows + .pop() + .ok_or_else(|| anyhow!("expected exactly one scheduled event row from insert")); } + Err(anyhow!( + "Unexpected number of rows affected: {}", + rows.len() + )) } diff --git a/packages/windmill/src/postgres/secret.rs b/packages/windmill/src/postgres/secret.rs index 367201146b..1603cb4565 100644 --- a/packages/windmill/src/postgres/secret.rs +++ b/packages/windmill/src/postgres/secret.rs @@ -102,12 +102,16 @@ pub async fn get_secret_by_key( .collect::>>() .with_context(|| "Error converting rows into Secrets")?; - if secrets.is_empty() { - return Ok(None); - } else if secrets.len() > 1 { - return Err(anyhow!("Found too many secrets: {}", secrets.len())); + match secrets.len() { + 0 => Ok(None), + 1 => Ok(Some( + secrets + .into_iter() + .next() + .ok_or_else(|| anyhow!("expected one secret row"))?, + )), + n => Err(anyhow!("Found too many secrets: {n}")), } - Ok(Some(secrets[0].clone())) } /// Insert secret into the database. /// @@ -125,11 +129,11 @@ pub async fn insert_secret( encrypted_bytes: &Vec, ) -> Result { let tenant_uuid = parse_uuid_v4(tenant_id) - .map_err(|err| anyhow!("Error parsing tenant_id as UUID: {}", err))?; + .map_err(|err| anyhow!("Error parsing tenant_id as UUID: {err}"))?; let election_event_uuid = election_event_id .map(|id| { parse_uuid_v4(id) - .map_err(|err| anyhow!("Error parsing election_event_id as UUID: {}", err)) + .map_err(|err| anyhow!("Error parsing election_event_id as UUID: {err}")) }) .transpose()?; let statement = hasura_transaction @@ -160,9 +164,11 @@ pub async fn insert_secret( .collect::>>() .map_err(|err| anyhow!("Error deserializing secret: {err}"))?; - if 1 == rows.len() { - Ok(rows[0].clone()) - } else { - Err(anyhow!("Unexpected rows affected {}", rows.len())) + if rows.len() == 1 { + let mut rows = rows; + return rows + .pop() + .ok_or_else(|| anyhow!("expected exactly one secret row from insert")); } + Err(anyhow!("Unexpected rows affected {}", rows.len())) } diff --git a/packages/windmill/src/postgres/tally_session.rs b/packages/windmill/src/postgres/tally_session.rs index cb541c0762..5294950b2a 100644 --- a/packages/windmill/src/postgres/tally_session.rs +++ b/packages/windmill/src/postgres/tally_session.rs @@ -53,7 +53,7 @@ impl TryFrom for TallySessionWrapper { is_execution_completed: item.try_get("is_execution_completed")?, keys_ceremony_id: item.try_get::<_, Uuid>("keys_ceremony_id")?.to_string(), execution_status: item.try_get("execution_status")?, - threshold: item.try_get::<_, i32>("threshold")? as i64, + threshold: i64::from(item.try_get::<_, i32>("threshold")?), configuration: item .try_get::<_, Option>("configuration")? .map(deserialize_value) @@ -624,7 +624,12 @@ pub async fn insert_many_tally_sessions( election_ids, area_ids, execution_status: session.execution_status.clone(), - threshold: session.threshold as i32, + threshold: i32::try_from(session.threshold).map_err(|_| { + anyhow!( + "tally session threshold out of i32 range for session {}", + session.id + ) + })?, configuration: configuration_json, tally_type: session.tally_type.clone(), annotations: session.annotations.clone(), diff --git a/packages/windmill/src/postgres/tally_session_contest.rs b/packages/windmill/src/postgres/tally_session_contest.rs index a1495e89c4..ef0a5e70f7 100644 --- a/packages/windmill/src/postgres/tally_session_contest.rs +++ b/packages/windmill/src/postgres/tally_session_contest.rs @@ -44,7 +44,6 @@ impl TryFrom for TallySessionContestWrapper { /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - pub async fn update_tally_session_contests_annotations( hasura_transaction: &Transaction<'_>, contests: &[TallySessionContest], @@ -106,6 +105,8 @@ pub async fn insert_tally_session_contest( election_id: &str, ) -> Result { let contest_uuid = contest_id.map(|val| parse_uuid_v4(&val)).transpose()?; + let session_id_i32 = i32::try_from(session_id) + .map_err(|_| anyhow!("session_id does not fit in i32 for tally_session_contest insert"))?; let statement = hasura_transaction .prepare( @@ -135,13 +136,13 @@ pub async fn insert_tally_session_contest( &parse_uuid_v4(election_event_id)?, &parse_uuid_v4(area_id)?, &contest_uuid, - &(session_id as i32), + &session_id_i32, &parse_uuid_v4(tally_session_id)?, &parse_uuid_v4(election_id)?, ], ) .await - .map_err(|err| anyhow!("Error inserting row: {}", err))?; + .map_err(|err| anyhow!("Error inserting row: {err}"))?; let values: Vec = rows .into_iter() @@ -203,7 +204,8 @@ pub async fn get_tally_session_highest_batch( .into_iter() .map(|row| -> Result { let session_id: i32 = row.try_get("session_id")?; - Ok(session_id as BatchNumber) + usize::try_from(session_id) + .map_err(|_| anyhow!("session_id must be non-negative for tally batch")) }) .collect::>>()?; diff --git a/packages/windmill/src/postgres/tally_session_execution.rs b/packages/windmill/src/postgres/tally_session_execution.rs index b8886d4faa..cfdb8182eb 100644 --- a/packages/windmill/src/postgres/tally_session_execution.rs +++ b/packages/windmill/src/postgres/tally_session_execution.rs @@ -234,7 +234,6 @@ pub async fn get_last_tally_session_execution( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - pub async fn get_event_tally_session_executions( hasura_transaction: &Transaction<'_>, tenant_id: &str, diff --git a/packages/windmill/src/postgres/tasks_execution.rs b/packages/windmill/src/postgres/tasks_execution.rs index a5aff65fac..1025a1b31f 100644 --- a/packages/windmill/src/postgres/tasks_execution.rs +++ b/packages/windmill/src/postgres/tasks_execution.rs @@ -19,7 +19,7 @@ use uuid::Uuid; /// Tasks execution wrapper pub struct TasksExecutionWrapper(pub TasksExecution); -/// Implements a conversion from a database row to that TasksExecutionWrapper structure +/// Implements a conversion from a database row to that `TasksExecutionWrapper` structure impl TryFrom for TasksExecutionWrapper { type Error = anyhow::Error; @@ -30,16 +30,16 @@ impl TryFrom for TasksExecutionWrapper { election_event_id: item .try_get::<_, Option>("election_event_id")? .map(|uuid| uuid.to_string()), - name: item.try_get::<_, String>("name")?.to_string(), - task_type: item.try_get::<_, String>("type")?.to_string(), - execution_status: item.try_get::<_, String>("execution_status")?.to_string(), + name: item.try_get::<_, String>("name")?.clone(), + task_type: item.try_get::<_, String>("type")?.clone(), + execution_status: item.try_get::<_, String>("execution_status")?.clone(), created_at: item.get("created_at"), start_at: item.get("start_at"), end_at: item.get("end_at"), annotations: item.try_get("annotations")?, labels: item.try_get("labels")?, logs: item.try_get("logs")?, - executed_by_user: item.try_get::<_, String>("executed_by_user")?.to_string(), + executed_by_user: item.try_get::<_, String>("executed_by_user")?.clone(), })) } } @@ -72,13 +72,13 @@ pub async fn insert_tasks_execution( parse_uuid_v4(tenant_id).map_err(|err| anyhow!("Error parsing tenant UUID: {err}"))?; let election_event_uuid = if let Some(event_id) = election_event_id { - if !event_id.is_empty() { + if event_id.is_empty() { + None + } else { Some( parse_uuid_v4(event_id) .map_err(|err| anyhow!("Error parsing election event UUID: {err}"))?, ) - } else { - None } } else { None @@ -131,7 +131,6 @@ pub async fn insert_tasks_execution( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - pub async fn update_task_execution_status( tenant_id: &str, task_execution_id: &str, diff --git a/packages/windmill/src/postgres/tenant.rs b/packages/windmill/src/postgres/tenant.rs index 98610cf787..7842dd6d81 100644 --- a/packages/windmill/src/postgres/tenant.rs +++ b/packages/windmill/src/postgres/tenant.rs @@ -135,7 +135,7 @@ pub async fn update_tenant( if rows == 0 { return Err(anyhow!("No tenant found with the given tenant_id and id")); } else if rows > 2 { - return Err(anyhow!("Too many affected rows in table tenant: {}", rows)); + return Err(anyhow!("Too many affected rows in table tenant: {rows}")); } Ok(()) diff --git a/packages/windmill/src/services/application.rs b/packages/windmill/src/services/application.rs index 55fe60bbae..155188ddec 100644 --- a/packages/windmill/src/services/application.rs +++ b/packages/windmill/src/services/application.rs @@ -23,7 +23,7 @@ use anyhow::{anyhow, Context, Result}; use deadpool_postgres::Transaction; use keycloak::types::CredentialRepresentation; use sequent_core::ballot::{ElectionEventPresentation, I18nContent}; -use sequent_core::serialization::deserialize_with_path::*; +use sequent_core::serialization::deserialize_with_path::{deserialize_str, deserialize_value}; use sequent_core::services::keycloak::get_event_realm; use sequent_core::services::keycloak::KeycloakAdminClient; @@ -39,7 +39,7 @@ use tracing::{debug, event, info, instrument, warn, Level}; use uuid::Uuid; use sequent_core::types::templates::AudienceSelection::SELECTED; -use sequent_core::types::templates::TemplateMethod::{EMAIL, SMS}; +use sequent_core::types::templates::TemplateMethod::{DOCUMENT, EMAIL, SMS}; use unicode_normalization::char::decompose_canonical; #[allow(non_camel_case_types)] @@ -86,6 +86,7 @@ struct ApplicationCommunicationChannels { /// Returns an error if one of the operations fails. #[instrument(skip_all, err)] +#[allow(clippy::implicit_hasher)] pub async fn verify_application( hasura_transaction: &Transaction<'_>, keycloak_transaction: &Transaction<'_>, @@ -226,27 +227,27 @@ fn get_filter_from_applicant_data( let mut email = None; let mut attributes_map = HashMap::new(); - for attribute in search_attributes.split(",") { + for attribute in search_attributes.split(',') { match attribute { "firstName" => { first_name = applicant_data .get("firstName") - .map(|value| FilterOption::IsEqualNormalized(value.to_string())); + .map(|value| FilterOption::IsEqualNormalized(value.clone())); } "lastName" => { last_name = applicant_data .get("lastName") - .map(|value| FilterOption::IsEqualNormalized(value.to_string())); + .map(|value| FilterOption::IsEqualNormalized(value.clone())); } "username" => { username = applicant_data .get("username") - .map(|value| FilterOption::IsEqualNormalized(value.to_string())); + .map(|value| FilterOption::IsEqualNormalized(value.clone())); } "email" => { email = applicant_data .get("email") - .map(|value| FilterOption::IsEqualNormalized(value.to_string())); + .map(|value| FilterOption::IsEqualNormalized(value.clone())); } "embassy" => { // Ignore embassy to speed up user lookup @@ -256,7 +257,7 @@ fn get_filter_from_applicant_data( .get(attribute) .cloned() // Return an empty string if a value is missing from the applicant data. - .unwrap_or("".to_string()); + .unwrap_or_else(String::new); attributes_map.insert(attribute.to_string(), value); } @@ -294,7 +295,7 @@ fn get_filter_from_applicant_data( /// Build manual verify reason. #[instrument(skip_all)] -fn build_manual_verify_reason(fields_match: HashMap) -> String { +fn build_manual_verify_reason(fields_match: &HashMap) -> String { let mismatch_fields = fields_match .iter() .filter(|(_, &value)| !value) @@ -350,6 +351,7 @@ pub struct ApplicationVerificationResult { } /// Automatic verification workflow. +#[allow(clippy::too_many_lines)] #[instrument(err, skip_all)] fn automatic_verification( users: Vec, @@ -367,7 +369,7 @@ fn automatic_verification( // Set fields match all to false for default response let fields_match: HashMap = search_attributes - .split(",") + .split(',') .map(|field| (field.trim().to_string(), false)) .collect(); @@ -383,7 +385,7 @@ fn automatic_verification( let mut mismatch_reason = None; for user in users { - let (mismatches, mismatches_unset, fields_match, attributes_unset) = check_mismatches( + let (mismatches, mismatches_unset, user_fields_match, attributes_unset) = check_mismatches( &user, applicant_data, search_attributes.clone(), @@ -392,7 +394,7 @@ fn automatic_verification( let username = user.username.clone().unwrap_or_default(); if mismatches > 0 { - mismatch_reason = Some(build_manual_verify_reason(fields_match.clone())); + mismatch_reason = Some(build_manual_verify_reason(&user_fields_match)); } // If there are no mismatches.. @@ -405,7 +407,7 @@ fn automatic_verification( matched_status = ApplicationStatus::REJECTED; matched_type = ApplicationType::AUTOMATIC; verification_mismatches = Some(mismatches); - verification_fields_match = Some(fields_match); + verification_fields_match = Some(user_fields_match); verification_attributes_unset = Some(attributes_unset); rejection_reason = Some(ApplicationRejectReason::ALREADY_APPROVED); rejection_message = None; @@ -416,7 +418,7 @@ fn automatic_verification( application_status: ApplicationStatus::ACCEPTED, application_type: ApplicationType::AUTOMATIC, mismatches: Some(mismatches), - fields_match: Some(fields_match), + fields_match: Some(user_fields_match), attributes_unset: Some(attributes_unset), rejection_reason: None, rejection_message: None, @@ -433,19 +435,19 @@ fn automatic_verification( matched_status = ApplicationStatus::REJECTED; matched_type = ApplicationType::AUTOMATIC; verification_mismatches = Some(mismatches); - verification_fields_match = Some(fields_match); + verification_fields_match = Some(user_fields_match); verification_attributes_unset = Some(attributes_unset); rejection_reason = Some(ApplicationRejectReason::ALREADY_APPROVED); rejection_message = None; } else { - if !fields_match.get("embassy").unwrap_or(&false) { + if !user_fields_match.get("embassy").unwrap_or(&false) { return Ok(ApplicationVerificationResult { user_id: user.id, username, application_status: ApplicationStatus::ACCEPTED, application_type: ApplicationType::AUTOMATIC, mismatches: Some(mismatches), - fields_match: Some(fields_match), + fields_match: Some(user_fields_match), attributes_unset: Some(attributes_unset), rejection_reason: None, rejection_message: None, @@ -456,21 +458,21 @@ fn automatic_verification( matched_status = ApplicationStatus::PENDING; matched_type = ApplicationType::MANUAL; verification_mismatches = Some(mismatches); - verification_fields_match = Some(fields_match); + verification_fields_match = Some(user_fields_match); verification_attributes_unset = Some(attributes_unset); rejection_reason = Some(ApplicationRejectReason::NO_VOTER); rejection_message = None; } } else if mismatches == 2 - && (!fields_match.get("embassy").unwrap_or(&false) - || (!fields_match.get("middleName").unwrap_or(&false) - && !fields_match.get("lastName").unwrap_or(&false))) + && (!user_fields_match.get("embassy").unwrap_or(&false) + || (!user_fields_match.get("middleName").unwrap_or(&false) + && !user_fields_match.get("lastName").unwrap_or(&false))) { matched_user = None; matched_status = ApplicationStatus::PENDING; matched_type = ApplicationType::MANUAL; verification_mismatches = Some(mismatches); - verification_fields_match = Some(fields_match); + verification_fields_match = Some(user_fields_match); verification_attributes_unset = Some(attributes_unset); rejection_reason = Some(ApplicationRejectReason::NO_VOTER); rejection_message = None; @@ -479,7 +481,7 @@ fn automatic_verification( matched_status = ApplicationStatus::REJECTED; matched_type = ApplicationType::AUTOMATIC; verification_mismatches = Some(mismatches); - verification_fields_match = Some(fields_match); + verification_fields_match = Some(user_fields_match); verification_attributes_unset = Some(attributes_unset); rejection_reason = Some(ApplicationRejectReason::NO_VOTER); rejection_message = None; @@ -512,7 +514,7 @@ fn automatic_verification( /// Type alias `VerificationMismatchSummary` to keep signatures readable. type VerificationMismatchSummary = (usize, usize, HashMap, HashMap); /// Check mismatches workflow. - +#[allow(clippy::too_many_lines)] #[instrument(err)] fn check_mismatches( user: &User, @@ -538,7 +540,7 @@ fn check_mismatches( let card_type_flag = card_type == ECardType::SEAMANS_BOOK.to_string() || card_type == ECardType::DRIVER_LICENSE.to_string(); - for field_to_check in fields_to_check.split(",") { + for field_to_check in fields_to_check.split(',') { let field = field_to_check.trim(); // Special handling for firstName when card_type_flag is true @@ -549,10 +551,10 @@ fn check_mismatches( // Extract first and middle names from applicant_data let applicant_first_name = applicant_data .get("firstName") - .map(|value| value.to_string().to_lowercase()); + .map(|value| value.clone().to_lowercase()); let applicant_middle_name = applicant_data .get("middleName") - .map(|value| value.to_string().to_lowercase()); + .map(|value| value.clone().to_lowercase()); let applicant_combined = match (applicant_first_name, applicant_middle_name) { (Some(first), Some(middle)) => { Some(format!("{first} {middle}").trim().to_string()) @@ -582,7 +584,8 @@ fn check_mismatches( _ => None, }; - let is_match = is_fuzzy_match(applicant_combined, user_combined); + let is_match = + is_fuzzy_match(applicant_combined.as_deref(), user_combined.as_deref()); match_result.insert("firstName.middleName".to_string(), is_match); if !is_match { @@ -595,7 +598,7 @@ fn check_mismatches( // Extract field from applicant_data let applicant_field_value = applicant_data .get(field) - .map(|value| value.to_string().to_lowercase()); + .map(|value| value.clone().to_lowercase()); // Extract field from user let user_field_value = match field { @@ -608,11 +611,14 @@ fn check_mismatches( .as_ref() .and_then(|attributes| attributes.get(field_to_check)) .and_then(|values| values.first()) - .map(|value| value.to_string()), + .cloned(), }; let user_field_value = user_field_value.clone().map(|value| value.to_lowercase()); - let is_match = is_fuzzy_match(applicant_field_value, user_field_value); + let is_match = is_fuzzy_match( + applicant_field_value.as_deref(), + user_field_value.as_deref(), + ); // Check match match_result.insert(field_to_check.to_string(), is_match); @@ -624,8 +630,8 @@ fn check_mismatches( let mut unset_mismatches: usize = 0; - for fields_to_check_unset in fields_to_check_unset.split(",") { - let field_unset = fields_to_check_unset.trim(); + for unset_field_name in fields_to_check_unset.split(',') { + let field_unset = unset_field_name.trim(); // Extract field from user let user_field_value = match field_unset { @@ -638,7 +644,7 @@ fn check_mismatches( .as_ref() .and_then(|attributes| attributes.get(field_unset)) .and_then(|values| values.first()) - .map(|value| value.to_string()), + .cloned(), }; let user_field_value = user_field_value.clone().map(|value| value.to_lowercase()); @@ -701,7 +707,7 @@ async fn get_i18n_default_application_communication( match app_status { ApplicationStatus::ACCEPTED => Ok(application.accepted), ApplicationStatus::REJECTED => Ok(application.rejected), - _ => Err(anyhow::anyhow!("Not a valid application status")), + ApplicationStatus::PENDING => Err(anyhow::anyhow!("Not a valid application status")), } } @@ -730,7 +736,7 @@ pub async fn get_i18n_application_communication( .flatten() { application_channels.sms.message = sms_message; - }; + } if let Some(email_subject) = localization_map .get(&format!("{key_prefix}.email.subject")) @@ -738,7 +744,7 @@ pub async fn get_i18n_application_communication( .flatten() { application_channels.email.subject = email_subject; - }; + } if let Some(plaintext_body) = localization_map .get(&format!("{key_prefix}.email.plaintext_body")) @@ -746,7 +752,7 @@ pub async fn get_i18n_application_communication( .flatten() { application_channels.email.plaintext_body = plaintext_body; - }; + } if let Some(html_body) = localization_map .get(&format!("{key_prefix}.email.html_body")) @@ -754,7 +760,7 @@ pub async fn get_i18n_application_communication( .flatten() { application_channels.email.html_body = Some(html_body); - }; + } Ok(application_channels) } @@ -793,7 +799,7 @@ pub async fn get_application_response_communication( match communication_method { EMAIL => Ok((Some(appl_comm.email), None)), SMS => Ok((None, Some(appl_comm.sms))), - _ => Ok((None, None)), + DOCUMENT => Ok((None, None)), } } /// Confirm application workflow. @@ -802,7 +808,7 @@ pub async fn get_application_response_communication( /// /// Returns an error if SQL preparation or execution fails, /// if UUID or other parsing fails. - +#[allow(clippy::too_many_lines)] #[instrument(skip_all, err)] pub async fn confirm_application( hasura_transaction: &Transaction<'_>, @@ -883,7 +889,7 @@ pub async fn confirm_application( key.to_owned(), value .to_string() - .split(";") + .split(';') .map(|value| value.trim_matches('"').to_string()) .collect(), ) @@ -925,11 +931,11 @@ pub async fn confirm_application( info!("update_attributes={attributes:?}, attributes_to_store={attributes_to_store:?}"); - let client = KeycloakAdminClient::new() + let keycloak_client = KeycloakAdminClient::new() .await .map_err(|err| anyhow!("Error obtaining keycloak admin client: {err}"))?; - let user = client + let updated_user = keycloak_client .edit_user_with_credentials( &realm, user_id, @@ -951,13 +957,13 @@ pub async fn confirm_application( election_event_id, user_id, admin_id, - &user, + &updated_user, ApplicationStatus::ACCEPTED, ) .await .map_err(|err| anyhow!("Error sending communication response: {err}"))?; - Ok((application, user)) + Ok((application, updated_user)) } /// Reject application workflow. /// @@ -1069,7 +1075,7 @@ pub async fn send_application_communication_response( .as_ref() .and_then(|attributes| attributes.get(MOBILE_PHONE_ATTR_NAME)) .and_then(|values| values.first()) - .map(|value| value.to_string()) + .cloned() .is_some() { Some(SMS) @@ -1083,8 +1089,7 @@ pub async fn send_application_communication_response( .await .with_context(|| "Error obtaining election event")? .presentation - .map(deserialize_value) - .unwrap_or(Ok(ElectionEventPresentation::default()))?; + .map_or(Ok(ElectionEventPresentation::default()), deserialize_value)?; let (email_config, sms_config) = get_application_response_communication( communication_method.clone(), @@ -1143,7 +1148,7 @@ pub async fn send_application_communication_response( .await .map_err(|err| anyhow!("Error sending email or sms: {err}"))?; } - _ => {} + ApplicationStatus::PENDING => {} } Ok(()) @@ -1153,7 +1158,6 @@ pub async fn send_application_communication_response( /// # Errors /// /// Returns an error if one of the operations fails. - #[instrument(err, skip_all)] pub async fn get_group_names(realm: &str, user_id: &str) -> Result> { let client = KeycloakAdminClient::new() @@ -1161,13 +1165,13 @@ pub async fn get_group_names(realm: &str, user_id: &str) -> Result> .map_err(|err| anyhow!("Error create keycloak admin client: {err}"))?; // Fetch user groups from Keycloak - let _groups = client + let groups = client .get_user_groups(realm, user_id) .await .map_err(|err| anyhow!("Error fetch group names: {err}"))?; // Extract group names - let group_names: Vec = _groups + let group_names: Vec = groups .into_iter() .map(|group| group.group_name) // Assuming `group_name` is a String .collect(); @@ -1178,7 +1182,7 @@ pub async fn get_group_names(realm: &str, user_id: &str) -> Result> /// Convert string to unaccented. #[instrument(skip_all)] -fn string_to_unaccented(word: String) -> String { +fn string_to_unaccented(word: &str) -> String { let mut unaccented_word = String::new(); for l in word.chars() { let mut base_char = None; @@ -1194,24 +1198,23 @@ fn string_to_unaccented(word: String) -> String { /// Convert to unaccented without hyphen. #[instrument(skip_all)] -fn to_unaccented_without_hyphen(word: Option) -> Option { +fn to_unaccented_without_hyphen(word: Option<&str>) -> Option { let word = match word { - Some(word) => word.replace("-", " ").replace(".", ""), + Some(word) => word.replace('-', " ").replace('.', ""), None => return None, }; - let unaccented_word = string_to_unaccented(word); + let unaccented_word = string_to_unaccented(&word); Some(unaccented_word) } /// Assumes that the inputs are already lowercase #[instrument] -fn is_fuzzy_match(applicant_value: Option, user_value: Option) -> bool { - let applicant_value_s = applicant_value.clone().unwrap_or_default(); - let user_value_s = user_value.clone().unwrap_or_default(); +fn is_fuzzy_match(applicant_value: Option<&str>, user_value: Option<&str>) -> bool { + let applicant_value_s = applicant_value.unwrap_or("").to_string(); + let user_value_s = user_value.unwrap_or("").to_string(); let unaccented_applicant_value = - to_unaccented_without_hyphen(applicant_value.clone()).unwrap_or_default(); - let unaccented_user_value = - to_unaccented_without_hyphen(user_value.clone()).unwrap_or_default(); + to_unaccented_without_hyphen(applicant_value).unwrap_or_default(); + let unaccented_user_value = to_unaccented_without_hyphen(user_value).unwrap_or_default(); match ( applicant_value_s.trim() == user_value_s.trim(), applicant_value_s.trim() == unaccented_user_value.trim(), @@ -1231,7 +1234,7 @@ mod tests { fn test_accent_mark() { let applicant_value: Option = Some("manuel".to_string()); let user_value: Option = Some("mánuel".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, @@ -1244,7 +1247,7 @@ mod tests { fn test_grave_accent() { let applicant_value: Option = Some("pierre".to_string()); let user_value: Option = Some("pièrre".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, "applicant_value ({:?}) does not match user_value ({:?})", @@ -1256,7 +1259,7 @@ mod tests { fn test_circumflex() { let applicant_value: Option = Some("paulo".to_string()); let user_value: Option = Some("paulô".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, "applicant_value ({:?}) does not match user_value ({:?})", @@ -1268,7 +1271,7 @@ mod tests { fn test_tilde() { let applicant_value: Option = Some("manuel".to_string()); let user_value: Option = Some("mañuel".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, "applicant_value ({:?}) does not match user_value ({:?})", @@ -1280,7 +1283,7 @@ mod tests { fn test_umlaut() { let applicant_value: Option = Some("muller".to_string()); let user_value: Option = Some("müller".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, "applicant_value ({:?}) does not match user_value ({:?})", @@ -1293,7 +1296,7 @@ mod tests { // German umlaut will not match with its 2 characters equivalents let applicant_value: Option = Some("Mueller".to_string()); let user_value: Option = Some("Müller".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( !is_match, "applicant_value ({:?}) does not match user_value ({:?})", @@ -1305,7 +1308,7 @@ mod tests { fn test_hyphen_equals_space() { let applicant_value: Option = Some("von-der-leyen".to_string()); let user_value: Option = Some("von der leyen".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, "applicant_value ({:?}) does not match user_value ({:?})", @@ -1317,7 +1320,7 @@ mod tests { fn test_hyphen_equals_space_reverse() { let applicant_value: Option = Some("von der leyen".to_string()); let user_value: Option = Some("von-der-leyen".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, @@ -1330,7 +1333,7 @@ mod tests { fn test_none_vs_empty_string() { let applicant_value: Option = None; let user_value: Option = Some(" ".to_string()); - let is_match = is_fuzzy_match(applicant_value.clone(), user_value.clone()); + let is_match = is_fuzzy_match(applicant_value.as_deref(), user_value.as_deref()); assert!( is_match, diff --git a/packages/windmill/src/services/ballot_styles/ballot_publication.rs b/packages/windmill/src/services/ballot_styles/ballot_publication.rs index 3e061f8d97..5fae7b22a0 100644 --- a/packages/windmill/src/services/ballot_styles/ballot_publication.rs +++ b/packages/windmill/src/services/ballot_styles/ballot_publication.rs @@ -12,13 +12,13 @@ use crate::postgres::election_event::{get_election_event_by_id, update_election_ use crate::services::celery_app::get_celery_app; use crate::services::election_event_board::get_election_event_board; use crate::services::election_event_status::get_election_event_status; -use crate::services::electoral_log::*; +use crate::services::electoral_log::ElectoralLog; use crate::tasks::update_election_event_ballot_styles::update_election_event_ballot_styles; use anyhow::{anyhow, Context, Result}; use chrono::Utc; use deadpool_postgres::Transaction; use sequent_core::ballot::ElectionEventStatus; -use sequent_core::serialization::deserialize_with_path::*; +use sequent_core::serialization::deserialize_with_path::deserialize_str; use sequent_core::services::connection; use sequent_core::services::date::ISO8601; use serde::{Deserialize, Serialize}; @@ -256,9 +256,8 @@ pub async fn get_publication_json( .into_iter() .filter(|ballot_style| { election_id - .clone() - .map(|id| ballot_style.election_id == id) - .unwrap_or(true) + .as_ref() + .is_none_or(|id| ballot_style.election_id == *id) }) .map(|style| style.ballot_eml.clone()) .collect(); @@ -266,7 +265,7 @@ pub async fn get_publication_json( let val_arr: Vec = ballot_style_strings .iter() .map(|el| el.clone().and_then(|val| deserialize_str(&val).ok())) - .filter(|el| el.is_some()) + .filter(std::option::Option::is_some) .map(|el| el.ok_or(anyhow!("Empty ballot style!"))) .collect::>>()?; diff --git a/packages/windmill/src/services/ballot_styles/ballot_style.rs b/packages/windmill/src/services/ballot_styles/ballot_style.rs index 78c5b4887d..2bc4e45c6c 100644 --- a/packages/windmill/src/services/ballot_styles/ballot_style.rs +++ b/packages/windmill/src/services/ballot_styles/ballot_style.rs @@ -57,7 +57,7 @@ pub struct ElectionEventConfig { pub election_event_presentation: ElectionEventPresentation, } -/// Returns a HashMap> with all +/// Returns a `HashMap` from `election_id` to a set of `contest_id` with all /// the election ids and contest ids related to an area, /// including contests linked via parent areas in `areas_tree`. /// @@ -65,6 +65,7 @@ pub struct ElectionEventConfig { /// /// - `Err` when the publication lists no election ids. /// - `Err` when `area` is not present in `areas_tree`. +#[allow(clippy::implicit_hasher)] pub fn get_elections_contests_map_for_area( area: &Area, areas_tree: &TreeNode, @@ -90,7 +91,7 @@ pub fn get_elections_contests_map_for_area( // election_id, set let mut election_contest_map: HashMap> = HashMap::new(); - for area_contest in area_contests.iter() { + for area_contest in &area_contests { let Some(contest) = contests_map.get(&area_contest.contest_id) else { event!( Level::INFO, @@ -124,6 +125,7 @@ pub fn get_elections_contests_map_for_area( /// - Errors from [`get_elections_contests_map_for_area`] when the area cannot be resolved. /// - Missing election or contest rows, ballot construction errors, JSON serialization failures, /// or Postgres insert errors from [`crate::postgres::ballot_style::insert_ballot_style`]. +#[allow(clippy::implicit_hasher)] pub async fn create_ballot_style_postgres( transaction: &Transaction<'_>, area: &Area, @@ -146,7 +148,7 @@ pub async fn create_ballot_style_postgres( area_contests_map, )?; - for (election_id, contest_ids) in election_contest_map.into_iter() { + for (election_id, contest_ids) in election_contest_map { let election = elections_map .get(&election_id) .ok_or(anyhow!("election id not found {election_id}"))?; @@ -277,6 +279,7 @@ pub async fn create_public_election_event_config_file( /// - Pool, transaction, or query failures while loading Hasura-backed rows. /// - Missing ballot publication, ballot style generation errors, commit failures, or lock /// acquisition/release problems (surfaced as anyhow contexts). +#[allow(clippy::too_many_lines)] #[instrument(err)] pub async fn update_election_event_ballot_styles( tenant_id: &str, @@ -358,7 +361,7 @@ pub async fn update_election_event_ballot_styles( .map(|keys_ceremony: KeysCeremony| (keys_ceremony.id.clone(), keys_ceremony.clone())) .collect(); - let basic_areas = areas.iter().map(|area| area.into()).collect(); + let basic_areas = areas.iter().map(Into::into).collect(); let areas_tree = TreeNode::from_areas(basic_areas)?; for area in &areas { diff --git a/packages/windmill/src/services/cast_votes.rs b/packages/windmill/src/services/cast_votes.rs index bc46cac6e7..9185acb7fd 100644 --- a/packages/windmill/src/services/cast_votes.rs +++ b/packages/windmill/src/services/cast_votes.rs @@ -197,7 +197,7 @@ pub async fn count_cast_votes_election( let test_elections_clause = match is_test_election { Some(true) => "AND el.name ILIKE '%Test%'".to_string(), Some(false) => "AND el.name NOT ILIKE '%Test%'".to_string(), - None => "".to_string(), + None => String::new(), }; let statement_str = format!( @@ -322,7 +322,7 @@ pub async fn get_count_votes_per_day( /// # Errors /// /// Returns an error if one of the operations fails. - +#[allow(clippy::too_many_lines)] #[instrument(skip(hasura_transaction, users), err)] pub async fn get_users_with_vote_info( hasura_transaction: &Transaction<'_>, @@ -406,7 +406,7 @@ pub async fn get_users_with_vote_info( let voter_id_string: String = row .try_get("voter_id_string") .with_context(|| "Error getting voter_id_string from row")?; - let election_id: Uuid = row + let vote_election_id: Uuid = row .try_get("election_id") .with_context(|| "Error getting election_id from row")?; let num_votes: i64 = row @@ -420,8 +420,9 @@ pub async fn get_users_with_vote_info( .entry(voter_id_string) .or_default() .push(VotesInfo { - election_id: election_id.to_string(), - num_votes: num_votes as usize, + election_id: vote_election_id.to_string(), + num_votes: usize::try_from(num_votes) + .map_err(|_| anyhow!("num_votes from database does not fit in usize"))?, last_voted_at: last_voted_at.to_string(), }); } @@ -442,9 +443,9 @@ pub async fn get_users_with_vote_info( if let Some(attributes) = &user.attributes { if voted_via_not_internet_channel(attributes) { votes_info = vec![VotesInfo { - election_id: "".to_string(), // Not used for datafix + election_id: String::new(), // Not used for datafix num_votes: 1, - last_voted_at: "".to_string(), // Not used for datafix + last_voted_at: String::new(), // Not used for datafix }]; } } @@ -721,7 +722,7 @@ pub async fn count_cast_votes_election_event( let test_elections_clause = match is_test_election { Some(true) => "AND el.name ILIKE '%Test%'".to_string(), Some(false) => "AND el.name NOT ILIKE '%Test%'".to_string(), - None => "".to_string(), + None => String::new(), }; let statement_str = format!( diff --git a/packages/windmill/src/services/celery_app.rs b/packages/windmill/src/services/celery_app.rs index 165f8b7649..d5c7c88daa 100644 --- a/packages/windmill/src/services/celery_app.rs +++ b/packages/windmill/src/services/celery_app.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: AGPL-3.0-only //! Celery app construction, broker tuning, and per-deployment queue management. - +#![allow(clippy::non_std_lazy_statics)] use anyhow::{anyhow, Context, Result}; use async_once::AsyncOnce; use celery::prelude::Task; @@ -10,9 +10,10 @@ use celery::Celery; use lapin::{Connection, ConnectionProperties}; use std; use std::convert::AsRef; -use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU16, AtomicU32, AtomicUsize, Ordering}; +use std::sync::{Arc, LazyLock, Mutex}; use strum_macros::AsRefStr; -use tokio::sync::{Mutex, RwLock}; +use tokio::sync::RwLock; use tracing::{event, info, instrument, Level}; use crate::services::plugins_manager::plugin_manager::init_plugin_manager; @@ -92,107 +93,102 @@ pub enum Queue { impl Queue { /// Get the queue name for the Celery app. + #[must_use] pub fn queue_name(&self, slug: &str) -> String { format!("{}_{}", slug, self.as_ref()) } } /// AMQP prefetch count used when building the Celery app. -static mut PREFETCH_COUNT_S: u16 = 100; -/// Whether tasks are ACKed only after successful execution. -static mut ACKS_LATE_S: bool = true; +static PREFETCH_COUNT_S: AtomicU16 = AtomicU16::new(100); +/// Whether tasks are `ACKed` only after successful execution. +static ACKS_LATE_S: AtomicBool = AtomicBool::new(true); /// Maximum number of retries configured for Celery tasks. -static mut TASK_MAX_RETRIES: u32 = 4; +static TASK_MAX_RETRIES: AtomicU32 = AtomicU32::new(4); /// Global switch used to pause/resume the app workers. -static mut IS_APP_ACTIVE: bool = true; +static IS_APP_ACTIVE: AtomicBool = AtomicBool::new(true); /// Max retries while establishing the broker connection. -static mut BROKER_CONNECTION_MAX_RETRIES: u32 = 5; +static BROKER_CONNECTION_MAX_RETRIES: AtomicU32 = AtomicU32::new(5); /// AMQP heartbeat interval in seconds. -static mut HEARTBEAT_SECS: u16 = 10; +static HEARTBEAT_SECS: AtomicU16 = AtomicU16::new(10); /// Number of worker threads used by the Celery runtime. -static mut WORKER_THREADS: usize = 1; +static WORKER_THREADS: AtomicUsize = AtomicUsize::new(1); /// Explicit queue names to consume from, when configured. -static mut QUEUES: Vec = vec![]; +static QUEUES: Mutex> = Mutex::new(Vec::new()); /// Set the prefetch count for the Celery app. pub fn set_prefetch_count(new_val: u16) { - unsafe { - PREFETCH_COUNT_S = new_val; - } + PREFETCH_COUNT_S.store(new_val, Ordering::SeqCst); } /// Set the number of worker threads for the Celery app. pub fn set_worker_threads(new_val: usize) { - unsafe { - WORKER_THREADS = new_val; - } + WORKER_THREADS.store(new_val, Ordering::SeqCst); } /// Get the number of worker threads for the Celery app. +#[must_use] pub fn get_worker_threads() -> usize { - unsafe { WORKER_THREADS } + WORKER_THREADS.load(Ordering::SeqCst) } -/// Set whether tasks are ACKed late for the Celery app. +/// Set whether tasks are `ACKed` late for the Celery app. pub fn set_acks_late(new_val: bool) { - unsafe { - ACKS_LATE_S = new_val; - } + ACKS_LATE_S.store(new_val, Ordering::SeqCst); } /// Set the maximum number of retries for tasks for the Celery app. pub fn set_task_max_retries(new_val: u32) { - unsafe { - TASK_MAX_RETRIES = new_val; - } + TASK_MAX_RETRIES.store(new_val, Ordering::SeqCst); } /// Set the queues for the Celery app. +/// +/// # Panics +/// Panics if the CELERY queues mutex is poisoned. pub fn set_queues(new_val: Vec) { - unsafe { - QUEUES = new_val; - } + *QUEUES.lock().expect("CELERY queues mutex poisoned") = new_val; } #[instrument] /// Set whether the Celery app is active. pub fn set_is_app_active(new_val: bool) { - unsafe { - IS_APP_ACTIVE = new_val; - } + IS_APP_ACTIVE.store(new_val, Ordering::SeqCst); } /// Set the maximum number of retries for broker connections for the Celery app. pub fn set_broker_connection_max_retries(new_val: u32) { - unsafe { - BROKER_CONNECTION_MAX_RETRIES = new_val; - } + BROKER_CONNECTION_MAX_RETRIES.store(new_val, Ordering::SeqCst); } /// Set the heartbeat interval for the Celery app. pub fn set_heartbeat(new_val: u16) { - unsafe { - HEARTBEAT_SECS = new_val; - } + HEARTBEAT_SECS.store(new_val, Ordering::SeqCst); } /// Get whether the Celery app is active. +#[must_use] pub fn get_is_app_active() -> bool { - unsafe { IS_APP_ACTIVE } + IS_APP_ACTIVE.load(Ordering::SeqCst) } /// Get the queues for the Celery app. +/// +/// # Panics +/// Panics if the CELERY queues mutex is poisoned. +#[must_use] pub fn get_queues() -> Vec { - unsafe { QUEUES.clone() } + QUEUES.lock().expect("CELERY queues mutex poisoned").clone() } -lazy_static! { +lazy_static::lazy_static! { /// CELERY_APP holds the high-level Celery application. Note: The Celery app is /// built separately from the Broker because it handles task routing/scheduling. static ref CELERY_APP: AsyncOnce> = AsyncOnce::new(async { generate_celery_app().await.unwrap_or_else(|err| { - tracing::error!("{:#}", err); - panic!("{:#}", err); + tracing::error!("{err:#}"); + #[allow(clippy::panic)] + std::process::exit(1); }) }); } @@ -220,7 +216,7 @@ async fn create_connection() -> Result<(Arc, String)> { for amqp_url in amqp_urls { match Connection::connect(&amqp_url, ConnectionProperties::default()) .await - .with_context(|| format!("Failed to connect to any AMQP server {}", amqp_url)) + .with_context(|| format!("Failed to connect to any AMQP server {amqp_url}")) { Ok(connection) => { let arc_conn = Arc::new(connection); @@ -248,19 +244,13 @@ async fn create_connection() -> Result<(Arc, String)> { /// Returns an error if required environment variables are missing, broker connection fails, /// or task/plugin initialization fails. #[instrument] +#[allow(clippy::too_many_lines)] pub async fn generate_celery_app() -> Result> { - let prefetch_count: u16; - let acks_late: bool; - let task_max_retries: u32; - let broker_connection_max_retries: u32; - let heartbeat: u16; - unsafe { - prefetch_count = PREFETCH_COUNT_S; - acks_late = ACKS_LATE_S; - task_max_retries = TASK_MAX_RETRIES; - broker_connection_max_retries = BROKER_CONNECTION_MAX_RETRIES; - heartbeat = HEARTBEAT_SECS; - } + let prefetch_count = PREFETCH_COUNT_S.load(Ordering::SeqCst); + let acks_late = ACKS_LATE_S.load(Ordering::SeqCst); + let task_max_retries = TASK_MAX_RETRIES.load(Ordering::SeqCst); + let broker_connection_max_retries = BROKER_CONNECTION_MAX_RETRIES.load(Ordering::SeqCst); + let heartbeat = HEARTBEAT_SECS.load(Ordering::SeqCst); event!( Level::INFO, "prefetch_count: {}, acks_late: {}", @@ -386,7 +376,7 @@ pub async fn generate_celery_app() -> Result> { broker_connection_max_retries = broker_connection_max_retries, ) .await - .map_err(|err| anyhow!("{:?}", err)) + .map_err(|err| anyhow!("{err:?}")) } /// Cached AMQP connection used by the worker process. diff --git a/packages/windmill/src/services/ceremonies/encrypter.rs b/packages/windmill/src/services/ceremonies/encrypter.rs index 4640b964e7..4df056d020 100644 --- a/packages/windmill/src/services/ceremonies/encrypter.rs +++ b/packages/windmill/src/services/ceremonies/encrypter.rs @@ -69,9 +69,11 @@ pub async fn traversal_find_secrets_for_files( return Err(anyhow!("The provided path is not a directory")); } - let entries = WalkDir::new(folder_path).into_iter().filter_map(|e| e.ok()); - let election_id_regex = - Regex::new(r"election__[a-zA-Z0-9\s\-\_]*__([0-9a-fA-F\-]{36})").unwrap(); + let entries = WalkDir::new(folder_path) + .into_iter() + .filter_map(std::result::Result::ok); + let election_id_regex = Regex::new(r"election__[a-zA-Z0-9\s\-\_]*__([0-9a-fA-F\-]{36})") + .expect("static election id regex"); for entry in entries { let path = entry.path(); @@ -145,6 +147,7 @@ pub async fn traversal_find_secrets_for_files( /// /// - `Err` when `folder_path` is not a directory. /// - Encryption failures bubbled up from [`encrypt_directory_contents`]. +#[allow(clippy::implicit_hasher)] #[instrument(err, skip_all)] pub async fn traversal_encrypt_files( report_secrets_map: HashMap, @@ -155,9 +158,11 @@ pub async fn traversal_encrypt_files( return Err(anyhow!("The provided path is not a directory")); } - let entries = WalkDir::new(folder_path).into_iter().filter_map(|e| e.ok()); - let election_id_regex = - Regex::new(r"election__[a-zA-Z0-9\s\-\_]*__([0-9a-fA-F\-]{36})").unwrap(); + let entries = WalkDir::new(folder_path) + .into_iter() + .filter_map(std::result::Result::ok); + let election_id_regex = Regex::new(r"election__[a-zA-Z0-9\s\-\_]*__([0-9a-fA-F\-]{36})") + .expect("static election id regex"); for entry in entries { let path = entry.path(); @@ -265,6 +270,7 @@ pub async fn encrypt_directory_contents_sql( /// /// - Missing password entry for a matched report, encryption failures, or filesystem errors when /// removing the plaintext source file. +#[allow(clippy::implicit_hasher)] #[instrument(err, skip(report_secrets_map, election_ids, all_reports, old_path))] pub async fn encrypt_directory_contents( report_secrets_map: &HashMap, diff --git a/packages/windmill/src/services/ceremonies/insert_ballots.rs b/packages/windmill/src/services/ceremonies/insert_ballots.rs index b4c3c492d2..d0f37a8f16 100644 --- a/packages/windmill/src/services/ceremonies/insert_ballots.rs +++ b/packages/windmill/src/services/ceremonies/insert_ballots.rs @@ -10,7 +10,10 @@ use crate::services::celery_app::get_worker_threads; use crate::services::database::{get_hasura_pool, get_keycloak_pool, PgConfig}; use crate::services::election::get_election_event_elections; use crate::services::join::merge_join_csv; -use crate::services::protocol_manager::*; +use crate::services::protocol_manager::{ + add_ballots_to_board, generate_trustee_set, get_b3_pgsql_client, get_board_messages, + get_configuration, get_protocol_manager, get_public_key_hash, +}; use crate::services::public_keys::deserialize_public_key; use crate::services::users::list_keycloak_enabled_users_by_area_id_and_authorized_elections; use anyhow::{anyhow, Context, Result}; @@ -54,6 +57,7 @@ use std::sync::Arc; // Add this import /// /// Trustee lookup/deserialization failures, protocol manager errors, CSV or crypto errors, pool /// acquisition failures, or any `?` bubbled from board/network helpers inside the parallel tasks. +#[allow(clippy::too_many_lines)] #[instrument(skip_all, err)] pub async fn insert_ballots_messages( hasura_transaction: &Transaction<'_>, @@ -272,14 +276,14 @@ pub async fn insert_ballots_messages( let hashable_multi_ballot_contests = hashable_multi_ballot .deserialize_contests() - .map_err(|err| anyhow!("{:?}", err))?; + .map_err(|err| anyhow!("{err:?}"))?; Some(hashable_multi_ballot_contests.ciphertext) } else { let hashable_ballot: HashableBallot = deserialize_str(&ballot_str)?; let contests = hashable_ballot .deserialize_contests() - .map_err(|err| anyhow!("{:?}", err))?; + .map_err(|err| anyhow!("{err:?}"))?; contests .iter() .find(|contest| { @@ -300,7 +304,10 @@ pub async fn insert_ballots_messages( ); let mut board = get_b3_pgsql_client().await?; - let batch = tally_session_contest.session_id as BatchNumber; + let batch = + usize::try_from(tally_session_contest.session_id).map_err(|_| { + anyhow!("session_id must be non-negative for tally batch") + })?; add_ballots_to_board( &protocol_manager_arc_clone, // Use the Arc clone here &mut board, @@ -359,7 +366,7 @@ pub async fn get_elections_end_dates( .map(deserialize_value) .transpose() .map_err(|err| anyhow!("Error parsing election presentation {err:?}"))? - .unwrap_or(Default::default()); + .unwrap_or(ElectionPresentation::default()); let current_dates = election_presentation.dates.clone().unwrap_or_default(); let end_date = current_dates .end_date diff --git a/packages/windmill/src/services/ceremonies/keys_ceremony.rs b/packages/windmill/src/services/ceremonies/keys_ceremony.rs index 3b2d1af9d7..4355843828 100644 --- a/packages/windmill/src/services/ceremonies/keys_ceremony.rs +++ b/packages/windmill/src/services/ceremonies/keys_ceremony.rs @@ -9,7 +9,9 @@ use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::keys_ceremony; use crate::postgres::trustee; use crate::services::celery_app::get_celery_app; -use crate::services::ceremonies::serialize_logs::*; +use crate::services::ceremonies::serialize_logs::{ + append_keys_trustee_check_log, append_keys_trustee_download_log, generate_keys_initial_log, +}; use crate::services::election_event_board::get_election_event_board; use crate::services::election_event_status::get_election_event_status; use crate::services::electoral_log::ElectoralLog; @@ -343,6 +345,7 @@ pub async fn check_private_key( /// Trustee lookup mismatches, invalid thresholds, duplicate default ceremonies, conflicting /// per-election ceremonies, serialization failures, Postgres insert failures, or electoral log /// write errors. +#[allow(clippy::too_many_lines)] #[instrument(err)] pub async fn create_keys_ceremony( transaction: &Transaction<'_>, @@ -387,7 +390,7 @@ pub async fn create_keys_ceremony( let default_ceremony = keys_ceremonies .clone() .into_iter() - .find(|keys_ceremony| keys_ceremony.is_default()); + .find(KeysCeremony::is_default); if default_ceremony.is_some() { return Err(anyhow!( @@ -412,7 +415,7 @@ pub async fn create_keys_ceremony( if !keys_ceremonies.is_empty() { return Err(anyhow!("Can't create an election event keys ceremony when there are already existing keys ceremonies.")); } - }; + } // generate default values let keys_ceremony_id: String = Uuid::new_v4().to_string(); @@ -533,9 +536,8 @@ pub async fn validate_permission_labels( .await .map_err(|e| anyhow::anyhow!("Error getting election permissionlabel {e:?}"))?; - let user_permission_labels = match user_permission_labels { - Some(perms) => perms, - None => return Err(anyhow!("user dont have permission labels")), + let Some(user_permission_labels) = user_permission_labels else { + return Err(anyhow!("user dont have permission labels")); }; let user_permission_labels_json = user_permission_labels diff --git a/packages/windmill/src/services/ceremonies/renamer.rs b/packages/windmill/src/services/ceremonies/renamer.rs index 5a19676495..105f49df4b 100644 --- a/packages/windmill/src/services/ceremonies/renamer.rs +++ b/packages/windmill/src/services/ceremonies/renamer.rs @@ -21,12 +21,13 @@ pub const FOLDER_MAX_CHARS: usize = 200; /// # Errors /// /// Propagates `std::io::Error` from `rename` when a target path cannot be created. +#[allow(clippy::implicit_hasher)] #[instrument(skip_all, err)] pub fn rename_folders(replacements: &HashMap, folder_path: &PathBuf) -> Result<()> { // Collect directories and sort by depth in descending order let mut directories: Vec = WalkDir::new(folder_path) .into_iter() - .filter_map(|e| e.ok()) + .filter_map(std::result::Result::ok) .filter(|e| e.file_type().is_dir()) .collect(); @@ -52,6 +53,7 @@ pub fn rename_folders(replacements: &HashMap, folder_path: &Path } /// Returns up to the last `n` Unicode characters of `s`. +#[must_use] pub fn take_last_n_chars(s: &str, n: usize) -> String { s.chars() .rev() @@ -63,6 +65,7 @@ pub fn take_last_n_chars(s: &str, n: usize) -> String { } /// Returns up to the first `n` Unicode characters of `s`. +#[must_use] pub fn take_first_n_chars(s: &str, n: usize) -> String { s.chars().take(n).collect() } @@ -71,15 +74,7 @@ pub fn take_first_n_chars(s: &str, n: usize) -> String { /// and trimming trailing dots/spaces. fn sanitize_filename(filename: &str) -> String { let sanitized = filename - .replace("/", "_") // Linux and macOS directory separator - .replace("\\", "_") // Windows directory separator - .replace(":", "_") // Windows and classic macOS - .replace("*", "_") - .replace("?", "_") - .replace("\"", "_") - .replace("<", "_") - .replace(">", "_") - .replace("|", "_") + .replace(['/', '\\', ':', '*', '?', '"', '<', '>', '|'], "_") .trim_end_matches(&[' ', '.'][..]) // Trim trailing spaces and dots (Windows) .to_string(); diff --git a/packages/windmill/src/services/ceremonies/result_documents.rs b/packages/windmill/src/services/ceremonies/result_documents.rs index 4e179181b0..4bc615e563 100644 --- a/packages/windmill/src/services/ceremonies/result_documents.rs +++ b/packages/windmill/src/services/ceremonies/result_documents.rs @@ -5,12 +5,11 @@ use super::encrypter::{ encrypt_directory_contents, encrypt_directory_contents_sql, get_file_report_type, traversal_encrypt_files, traversal_find_secrets_for_files, }; -use super::renamer::rename_folders; +use super::renamer::{rename_folders, take_first_n_chars, FOLDER_MAX_CHARS}; use crate::postgres::document::get_document; use crate::postgres::reports::Report; use crate::postgres::reports::{get_reports_by_election_event_id, ReportType}; use crate::postgres::results_election_area::insert_results_election_area_documents; -use crate::services::ceremonies::renamer::*; use crate::{ postgres::{ results_area_contest::update_results_area_contest_documents, @@ -75,7 +74,7 @@ async fn generic_save_documents( hasura_transaction: &Transaction<'_>, tally_type_enum: TallyType, ) -> Result { - let mut documents: ResultDocuments = Default::default(); + let mut documents: ResultDocuments = ResultDocuments::default(); // Retrieve reports let all_reports = @@ -210,7 +209,7 @@ pub trait GenerateResultDocuments { /// Resolves filesystem paths for each artifact type, optionally scoped to `area_id`. fn get_document_paths(&self, area_id: Option, base_path: &Path) -> ResultDocumentPaths; /// Uploads artifacts described by `document_paths`, updates results tables, and optionally mirrors - /// ids into SQLite. + /// ids into `SQLite`. /// /// # Errors /// @@ -245,12 +244,14 @@ impl GenerateResultDocuments for Vec { } } - /// Create event related documents and update the results_event table. + /// Create event related documents and update the `results_event` table. /// /// # Errors /// /// Tar creation, encryption traversal, secret discovery, uploads, folder rename operations, or /// Postgres/SQLite updates performed inside this implementation. + #[allow(clippy::too_many_lines, clippy::future_not_send)] + // clippy::future_not_send: sqlite transaction is not send when passing as Option. #[instrument( skip(self, rename_map), err, @@ -293,8 +294,13 @@ impl GenerateResultDocuments for Vec { let (_original_tarfile_temp_path, original_tarfile_path, original_tarfile_size) = original_result; - let report_tenant_id = &self[0].reports[0].tenant_id; - let report_election_event_id = &self[0].reports[0].election_event_id; + let first_election = self.first().context("empty election report batch")?; + let first_report = first_election + .reports + .first() + .context("missing report row for tarball export")?; + let report_tenant_id = &first_report.tenant_id; + let report_election_event_id = &first_report.election_event_id; let all_reports = get_reports_by_election_event_id(hasura_transaction, tenant_id, election_event_id) @@ -322,7 +328,7 @@ impl GenerateResultDocuments for Vec { original_tarfile_size, "application/gzip", report_tenant_id, - Some(report_election_event_id.to_string()), + Some(report_election_event_id.clone()), "tally.tar.gz", None, false, @@ -366,10 +372,10 @@ impl GenerateResultDocuments for Vec { let (_tarfile_temp_path, tarfile_path, tarfile_size) = result; - let mut upload_path = tarfile_path.clone(); + let mut renamed_tar_upload_path = tarfile_path.clone(); // Encrypt the tar.gz folder if necessary before uploading - upload_path = encrypt_directory_contents_sql( + renamed_tar_upload_path = encrypt_directory_contents_sql( hasura_transaction, tenant_id, election_event_id, @@ -384,11 +390,11 @@ impl GenerateResultDocuments for Vec { // upload binary data into a document (s3 and hasura) let document = upload_and_return_document( hasura_transaction, - &upload_path, + &renamed_tar_upload_path, tarfile_size, "application/gzip", report_tenant_id, - Some(report_election_event_id.to_string()), + Some(report_election_event_id.clone()), "tally.tar.gz", None, false, @@ -499,6 +505,7 @@ impl GenerateResultDocuments for ElectionReportDataComputed { /// /// Missing report metadata, filesystem/hash errors, [`generic_save_documents`] failures, or /// Postgres/SQLite update errors. + #[allow(clippy::future_not_send)] #[instrument( err, skip(self, hasura_transaction), @@ -515,14 +522,14 @@ impl GenerateResultDocuments for ElectionReportDataComputed { tally_type_enum: TallyType, sqlite_transaction_opt: Option<&SqliteTransaction<'_>>, ) -> Result { - let tenant_id = self + let doc_tenant_id = self .reports .first() .context("Missing reports")? .tenant_id .clone(); - let election_event_id = self + let doc_election_event_id = self .reports .first() .context("Missing reports")? @@ -547,8 +554,8 @@ impl GenerateResultDocuments for ElectionReportDataComputed { // Save election results documents to S3 and Hasura let documents = generic_save_documents( document_paths, - &tenant_id, - &election_event_id, + &doc_tenant_id, + &doc_election_event_id, hasura_transaction, tally_type_enum, ) @@ -556,9 +563,9 @@ impl GenerateResultDocuments for ElectionReportDataComputed { update_results_election_documents( hasura_transaction, - &tenant_id, + &doc_tenant_id, results_event_id, - &election_event_id, + &doc_election_event_id, &election_id, &documents, &json_hash, @@ -568,9 +575,9 @@ impl GenerateResultDocuments for ElectionReportDataComputed { if let Some(sqlite_transaction) = sqlite_transaction_opt { update_results_election_documents_sqlite( sqlite_transaction, - &tenant_id, + &doc_tenant_id, results_event_id, - &election_event_id, + &doc_election_event_id, &election_id, &documents, &json_hash, @@ -636,7 +643,8 @@ impl GenerateResultDocuments for ReportDataComputed { /// # Errors /// /// Hashing or IO errors when reading JSON proofs, [`generic_save_documents`] failures, or database - /// updates for contest/area-contest rows (Hasura and optional SQLite). + /// updates for contest/area-contest rows (Hasura and optional `SQLite`). + #[allow(clippy::future_not_send)] #[instrument(err, skip(self), name = "ReportDataComputed::save_documents")] async fn save_documents( &self, @@ -651,8 +659,8 @@ impl GenerateResultDocuments for ReportDataComputed { ) -> Result { let documents = generic_save_documents( document_paths, - &self.tenant_id.to_string(), - &self.election_event_id.to_string(), + &self.tenant_id.clone(), + &self.election_event_id.clone(), hasura_transaction, tally_type_enum, ) @@ -778,6 +786,7 @@ pub fn generate_ids_map( /// /// Failures from [`generate_ids_map`], missing report data during saves, encryption/upload errors, or /// any database update returned by [`GenerateResultDocuments::save_documents`]. +#[allow(clippy::future_not_send)] #[instrument(skip(hasura_transaction, results, areas), err)] pub async fn save_result_documents( hasura_transaction: &Transaction<'_>, @@ -847,24 +856,24 @@ pub async fn save_result_documents( ) .await?; } - let areas: Vec = election_areas.values().cloned().collect(); + let report_areas: Vec = election_areas.values().cloned().collect(); - let report_election_event_id = election_report.reports[0].election_event_id.clone(); - let report_tenant_id = election_report.reports[0].tenant_id.clone(); - let report_election_id = election_report.reports[0].election_id.clone(); + let first_report = election_report + .reports + .first() + .context("missing report in election_report")?; + let report_election_event_id = first_report.election_event_id.clone(); + let report_tenant_id = first_report.tenant_id.clone(); + let report_election_id = first_report.election_id.as_ref(); - for area in areas { - let documents = get_area_document_paths( - area.id.clone(), - report_election_id.to_string(), - base_tally_path, - ); + for area in report_areas { + let documents = get_area_document_paths(&area.id, report_election_id, base_tally_path); save_area_documents( hasura_transaction, &report_tenant_id, &report_election_event_id, - &report_election_id, + report_election_id, &documents, results_event_id, None, @@ -880,8 +889,8 @@ pub async fn save_result_documents( /// Builds [`ResultDocumentPaths`] for a single area’s Velvet `generate-reports` subdirectory. fn get_area_document_paths( - area_id: String, - election_id: String, + area_id: &str, + election_id: &str, base_path: &Path, ) -> ResultDocumentPaths { let folder_path = base_path.join(format!( @@ -920,7 +929,8 @@ fn get_area_document_paths( /// /// # Errors /// -/// Propagates failures from [`generic_save_documents`], Hasura inserts, or SQLite mirror updates. +/// Propagates failures from [`generic_save_documents`], Hasura inserts, or `SQLite` mirror updates. +#[allow(clippy::future_not_send)] #[instrument(err, skip(hasura_transaction))] async fn save_area_documents( hasura_transaction: &Transaction<'_>, diff --git a/packages/windmill/src/services/ceremonies/results.rs b/packages/windmill/src/services/ceremonies/results.rs index ec29a1fd2f..35b4945bca 100644 --- a/packages/windmill/src/services/ceremonies/results.rs +++ b/packages/windmill/src/services/ceremonies/results.rs @@ -24,9 +24,12 @@ use sequent_core::sqlite::results_event::find_results_event_sqlite; use sequent_core::types::ceremonies::{TallySessionDocuments, TallyType}; use sequent_core::types::hasura::core::TallySessionExecution; use sequent_core::types::hasura::core::{Area, TallySession}; -use sequent_core::types::results::*; +use sequent_core::types::results::{ + ResultsAreaContest, ResultsAreaContestCandidate, ResultsContest, ResultsContestCandidate, + ResultsElection, EXTENDED_METRICS, PROCESS_RESULTS, +}; use sequent_core::util::temp_path::get_file_size; -use serde_json::json; +use serde_json::{json, Map, Value}; use std::cmp; use std::path::PathBuf; use tempfile::{NamedTempFile, TempPath}; @@ -38,6 +41,20 @@ use velvet::pipes::generate_db::DATABASE_FILENAME; use velvet::pipes::generate_reports::ElectionReportDataComputed; use velvet::pipes::pipe_name::PipeNameOutputDir; +/// Converts `u64` counts to `f64` for percentage math (same rounding as `as f64`). +#[allow(clippy::cast_precision_loss)] +#[inline] +const fn u64_to_f64(n: u64) -> f64 { + n as f64 +} + +/// Converts `usize` ranks to `i64` for persistence (same wrapping as `as i64` on overflow). +#[allow(clippy::cast_possible_wrap)] +#[inline] +const fn usize_to_i64(value: usize) -> i64 { + value as i64 +} + /// Inserts contest, area-contest, election, and candidate result rows for `results_event_id` from /// Velvet’s computed [`ElectionReportDataComputed`] vector. /// @@ -49,6 +66,7 @@ use velvet::pipes::pipe_name::PipeNameOutputDir; /// /// Percent-to-fraction conversions that fail `try_into`, or any Postgres insert error from the /// `insert_results_*` helpers. +#[allow(clippy::too_many_lines)] #[instrument(skip_all)] pub async fn save_results( hasura_transaction: &Transaction<'_>, @@ -64,7 +82,7 @@ pub async fn save_results( let mut results_area_contest_candidates: Vec = Vec::new(); for election in &results { let total_voters_percent: f64 = - (election.total_votes as f64) / (cmp::max(election.census, 1) as f64); + u64_to_f64(election.total_votes) / u64_to_f64(cmp::max(election.census, 1)); results_elections.push(ResultsElection { id: Uuid::new_v4().into(), tenant_id: tenant_id.into(), @@ -72,8 +90,8 @@ pub async fn save_results( election_id: election.election_id.clone(), results_event_id: results_event_id.into(), name: None, - elegible_census: Some(election.census as i64), - total_voters: Some(election.total_votes as i64), + elegible_census: Some(election.census.cast_signed()), + total_voters: Some(election.total_votes.cast_signed()), created_at: None, last_updated_at: None, labels: None, @@ -83,11 +101,12 @@ pub async fn save_results( }); for contest in &election.reports { - if contest.contest_result.is_none() || contest.contest.is_none() { + let Some(contest_result) = contest.contest_result.clone() else { continue; - } - let contest_result = contest.contest_result.clone().unwrap(); - let current_contest = contest.contest.clone().unwrap(); + }; + let Some(current_contest) = contest.contest.clone() else { + continue; + }; let contest_total_votes_percent: f64 = contest_result.percentage_total_votes / 100.0; let auditable_votes_percent: f64 = contest_result.percentage_auditable_votes / 100.0; @@ -105,12 +124,13 @@ pub async fn save_results( let contest_result_ext_metrics = contest_result.extended_metrics.unwrap_or_default(); let extended_metrics_value = serde_json::to_value(contest_result_ext_metrics) .expect("Failed to convert to JSON"); - let votes_base: f64 = cmp::max(contest_result_ext_metrics.total_weight, 1) as f64; - let mut annotations = json!({}); - annotations[EXTENDED_METRICS] = extended_metrics_value; + let votes_base: f64 = u64_to_f64(cmp::max(contest_result_ext_metrics.total_weight, 1)); + let mut annotation_map = Map::new(); + annotation_map.insert(EXTENDED_METRICS.to_string(), extended_metrics_value); if let Some(process_results) = contest_result.process_results.clone() { - annotations[PROCESS_RESULTS] = process_results; + annotation_map.insert(PROCESS_RESULTS.to_string(), process_results); } + let annotations = Value::Object(annotation_map); if let Some(area) = &contest.area { results_area_contests.push(ResultsAreaContest { @@ -121,32 +141,36 @@ pub async fn save_results( contest_id: current_contest.id.clone(), area_id: area.id.clone(), results_event_id: results_event_id.into(), - elegible_census: Some(contest_result.census as i64), - total_votes: Some(contest_result.total_votes as i64), + elegible_census: Some(contest_result.census.cast_signed()), + total_votes: Some(contest_result.total_votes.cast_signed()), total_votes_percent: Some( contest_total_votes_percent.clamp(0.0, 1.0).try_into()?, ), - total_auditable_votes: Some(contest_result.auditable_votes as i64), + total_auditable_votes: Some(contest_result.auditable_votes.cast_signed()), total_auditable_votes_percent: Some( auditable_votes_percent.clamp(0.0, 1.0).try_into()?, ), - total_valid_votes: Some(contest_result.total_valid_votes as i64), + total_valid_votes: Some(contest_result.total_valid_votes.cast_signed()), total_valid_votes_percent: Some( total_valid_votes_percent.clamp(0.0, 1.0).try_into()?, ), - total_invalid_votes: Some(contest_result.total_invalid_votes as i64), + total_invalid_votes: Some(contest_result.total_invalid_votes.cast_signed()), total_invalid_votes_percent: Some( total_invalid_votes_percent.clamp(0.0, 1.0).try_into()?, ), - explicit_invalid_votes: Some(contest_result.invalid_votes.explicit as i64), + explicit_invalid_votes: Some( + contest_result.invalid_votes.explicit.cast_signed(), + ), explicit_invalid_votes_percent: Some( explicit_invalid_votes_percent.clamp(0.0, 1.0).try_into()?, ), - implicit_invalid_votes: Some(contest_result.invalid_votes.implicit as i64), + implicit_invalid_votes: Some( + contest_result.invalid_votes.implicit.cast_signed(), + ), implicit_invalid_votes_percent: Some( implicit_invalid_votes_percent.clamp(0.0, 1.0).try_into()?, ), - blank_votes: Some(contest_result.total_blank_votes as i64), + blank_votes: Some(contest_result.total_blank_votes.cast_signed()), blank_votes_percent: Some( total_blank_votes_percent.clamp(0.0, 1.0).try_into()?, ), @@ -158,7 +182,7 @@ pub async fn save_results( }); for candidate in &contest.candidate_result { - let cast_votes_percent: f64 = (candidate.total_count as f64) / votes_base; + let cast_votes_percent: f64 = u64_to_f64(candidate.total_count) / votes_base; results_area_contest_candidates.push(ResultsAreaContestCandidate { id: Uuid::new_v4().into(), tenant_id: tenant_id.into(), @@ -168,9 +192,9 @@ pub async fn save_results( candidate_id: candidate.candidate.id.clone(), results_event_id: results_event_id.into(), area_id: area.id.clone(), - cast_votes: Some(candidate.total_count as i64), + cast_votes: Some(candidate.total_count.cast_signed()), cast_votes_percent: Some(cast_votes_percent.clamp(0.0, 1.0).try_into()?), - winning_position: candidate.winning_position.map(|val| val as i64), + winning_position: candidate.winning_position.map(usize_to_i64), points: None, created_at: None, last_updated_at: None, @@ -187,11 +211,15 @@ pub async fn save_results( election_id: election.election_id.clone(), contest_id: current_contest.id.clone(), results_event_id: results_event_id.into(), - elegible_census: Some(contest_result.census as i64), - total_valid_votes: Some(contest_result.total_valid_votes as i64), - explicit_invalid_votes: Some(contest_result.invalid_votes.explicit as i64), - implicit_invalid_votes: Some(contest_result.invalid_votes.implicit as i64), - blank_votes: Some(contest_result.total_blank_votes as i64), + elegible_census: Some(contest_result.census.cast_signed()), + total_valid_votes: Some(contest_result.total_valid_votes.cast_signed()), + explicit_invalid_votes: Some( + contest_result.invalid_votes.explicit.cast_signed(), + ), + implicit_invalid_votes: Some( + contest_result.invalid_votes.implicit.cast_signed(), + ), + blank_votes: Some(contest_result.total_blank_votes.cast_signed()), voting_type: current_contest.voting_type.clone(), counting_algorithm: current_contest .counting_algorithm @@ -201,7 +229,7 @@ pub async fn save_results( last_updated_at: None, labels: None, annotations: Some(annotations), - total_invalid_votes: Some(contest_result.total_invalid_votes as i64), + total_invalid_votes: Some(contest_result.total_invalid_votes.cast_signed()), total_invalid_votes_percent: Some( total_invalid_votes_percent.clamp(0.0, 1.0).try_into()?, ), @@ -217,19 +245,19 @@ pub async fn save_results( blank_votes_percent: Some( total_blank_votes_percent.clamp(0.0, 1.0).try_into()?, ), - total_votes: Some(contest_result.total_votes as i64), + total_votes: Some(contest_result.total_votes.cast_signed()), total_votes_percent: Some( contest_total_votes_percent.clamp(0.0, 1.0).try_into()?, ), documents: None, - total_auditable_votes: Some(contest_result.auditable_votes as i64), + total_auditable_votes: Some(contest_result.auditable_votes.cast_signed()), total_auditable_votes_percent: Some( auditable_votes_percent.clamp(0.0, 1.0).try_into()?, ), }); for candidate in &contest.candidate_result { - let cast_votes_percent: f64 = (candidate.total_count as f64) / votes_base; + let cast_votes_percent: f64 = u64_to_f64(candidate.total_count) / votes_base; results_contest_candidates.push(ResultsContestCandidate { id: Uuid::new_v4().into(), tenant_id: tenant_id.into(), @@ -238,8 +266,8 @@ pub async fn save_results( contest_id: current_contest.id.clone(), candidate_id: candidate.candidate.id.clone(), results_event_id: results_event_id.into(), - cast_votes: Some(candidate.total_count as i64), - winning_position: candidate.winning_position.map(|val| val as i64), + cast_votes: Some(candidate.total_count.cast_signed()), + winning_position: candidate.winning_position.map(usize_to_i64), points: None, created_at: None, last_updated_at: None, @@ -301,11 +329,12 @@ pub async fn save_results( } /// When `force_new_id` is set or the tally gained new session batches, inserts a new `results_event` -/// row (sourced from SQLite when a transaction is supplied) so later writes target a fresh id. +/// row (sourced from `SQLite` when a transaction is supplied) so later writes target a fresh id. /// /// # Errors /// -/// SQLite lookup failures, missing results-event metadata, or Postgres insert failures. +/// `SQLite` lookup failures, missing results-event metadata, or Postgres insert failures. +#[allow(clippy::future_not_send)] #[instrument(skip_all)] pub async fn generate_results_id_if_necessary( hasura_transaction: &Transaction<'_>, @@ -350,6 +379,7 @@ pub async fn generate_results_id_if_necessary( /// /// # Errors /// Should never return an error. +#[allow(clippy::future_not_send, clippy::large_futures)] #[instrument(skip_all)] pub async fn process_results_tables( hasura_transaction: &Transaction<'_>, @@ -409,13 +439,14 @@ pub async fn process_results_tables( } } -/// Updates the SQLite results database tables and uploads the artifact to object storage, +/// Updates the `SQLite` results database tables and uploads the artifact to object storage, /// and returns the active `results_event_id` and document handles. /// /// # Errors /// -/// SQLite open/transaction failures, async errors propagated through `block_in_place`, document +/// `SQLite` open/transaction failures, async errors propagated through `block_in_place`, document /// upload failures, or missing filesystem paths when preparing uploads. +#[allow(clippy::large_futures)] #[instrument(skip(hasura_transaction, state_opt, previous_execution, areas))] pub async fn populate_results_tables( hasura_transaction: &Transaction<'_>, @@ -437,12 +468,9 @@ pub async fn populate_results_tables( let database_path = base_database_path.join(DATABASE_FILENAME); let document_id = Uuid::new_v4().to_string(); - let results_event_id_opt = if !is_empty { + let results_event_id_opt = if is_empty { let results_event_id_opt = tokio::task::block_in_place(|| -> anyhow::Result> { - let mut sqlite_connection = Connection::open(&database_path)?; - let sqlite_transaction = sqlite_connection.transaction()?; - let process_result = tokio::runtime::Handle::current().block_on(async { process_results_tables( hasura_transaction, @@ -456,18 +484,20 @@ pub async fn populate_results_tables( areas, default_language, tally_type_enum, - Some(&sqlite_transaction), + None, force_new_id, ) .await })?; - sqlite_transaction.commit()?; Ok(process_result) })?; results_event_id_opt } else { let results_event_id_opt = tokio::task::block_in_place(|| -> anyhow::Result> { + let mut sqlite_connection = Connection::open(&database_path)?; + let sqlite_transaction = sqlite_connection.transaction()?; + let process_result = tokio::runtime::Handle::current().block_on(async { process_results_tables( hasura_transaction, @@ -481,11 +511,12 @@ pub async fn populate_results_tables( areas, default_language, tally_type_enum, - None, + Some(&sqlite_transaction), force_new_id, ) .await })?; + sqlite_transaction.commit()?; Ok(process_result) })?; results_event_id_opt @@ -504,13 +535,13 @@ pub async fn populate_results_tables( tenant_id, Some(election_event_id.to_string()), &file_name, - Some(document_id.to_string()), + Some(document_id.clone()), false, ) .await?; let documents = TallySessionDocuments { - sqlite: Some(document_id.to_string()), + sqlite: Some(document_id.clone()), xlsx: None, }; diff --git a/packages/windmill/src/services/ceremonies/serialize_logs.rs b/packages/windmill/src/services/ceremonies/serialize_logs.rs index fdb95e8e14..f9069c336b 100644 --- a/packages/windmill/src/services/ceremonies/serialize_logs.rs +++ b/packages/windmill/src/services/ceremonies/serialize_logs.rs @@ -13,6 +13,7 @@ use tracing::{event, instrument, Level}; /// /// Panics if converting the on-board timestamp to milliseconds would overflow `u64` multiplication /// by 1000 (`expect("timestamp millis overflow")`). +#[must_use] pub fn message_to_log(message: &Message) -> Log { let batch_number = message.statement.get_batch_number(); let timestamp = message @@ -20,7 +21,7 @@ pub fn message_to_log(message: &Message) -> Log { .get_timestamp() .checked_mul(1000) .expect("timestamp millis overflow"); - let datetime = ISO8601::timestamp_ms_utc_to_date(timestamp as i64); + let datetime = ISO8601::timestamp_ms_utc_to_date(timestamp.cast_signed()); Log { created_date: ISO8601::to_string(&datetime), @@ -44,7 +45,7 @@ pub fn print_messages(messages: &[Message], board_name: &str) -> Result<()> { let sorted_logs = sort_logs(&logs); event!(Level::INFO, "printing messages for board {}", board_name); - for log in sorted_logs.iter() { + for log in &sorted_logs { event!(Level::INFO, "{}: {}", log.created_date, log.log_text); } @@ -67,7 +68,8 @@ pub fn generate_logs( .iter() .filter(|message| { message.statement.get_timestamp() >= next_timestamp - && batch_ids.contains(&(message.statement.get_batch_number() as i64)) + && i64::try_from(message.statement.get_batch_number()) + .is_ok_and(|n| batch_ids.contains(&n)) }) .collect(); let logs: Vec = relevant_messages diff --git a/packages/windmill/src/services/ceremonies/tally_ceremony.rs b/packages/windmill/src/services/ceremonies/tally_ceremony.rs index f2f25d8c08..ace6b6b55f 100644 --- a/packages/windmill/src/services/ceremonies/tally_ceremony.rs +++ b/packages/windmill/src/services/ceremonies/tally_ceremony.rs @@ -31,11 +31,14 @@ use b3::messages::newtypes::BatchNumber; use deadpool_postgres::Transaction; use futures::try_join; use sequent_core::ballot::{AllowTallyStatus, ContestEncryptionPolicy}; -use sequent_core::serialization::deserialize_with_path::*; +use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::services::area_tree::ContestsData; use sequent_core::services::area_tree::TreeNode; use sequent_core::services::jwt::JwtClaims; -use sequent_core::types::ceremonies::*; +use sequent_core::types::ceremonies::{ + CeremoniesPolicy, KeysCeremonyExecutionStatus, KeysCeremonyStatus, TallyCeremonyStatus, + TallyElection, TallyElectionStatus, TallyExecutionStatus, TallyTrustee, TallyTrusteeStatus, +}; use sequent_core::types::hasura::core::KeysCeremony; use sequent_core::types::hasura::core::{AreaContest, TallySessionConfiguration}; use sequent_core::types::hasura::core::{ @@ -50,6 +53,13 @@ use std::str::FromStr; use tracing::{event, instrument, Level}; use uuid::Uuid; +/// Converts a `usize` to an `i64`. +#[allow(clippy::cast_possible_wrap)] +#[inline] +const fn usize_count_to_i64(n: usize) -> i64 { + n as i64 +} + /// Tuple containing the latest execution, owning session, contests, and ballot styles for the requested elections. type LastTallySessionExecutionBundle = ( TallySessionExecution, @@ -83,11 +93,8 @@ pub async fn find_last_tally_session_execution_and_all_related_data( ) .await?; - let tally_session_execution = match tally_session_execution { - Some(tally_session_execution) => tally_session_execution, - None => { - return Ok(None); - } + let Some(tally_session_execution) = tally_session_execution else { + return Ok(None); }; let tally_session = get_tally_session_by_id( @@ -164,7 +171,8 @@ pub async fn find_keys_ceremony( return Err(anyhow!("Elections have different keys ceremonies")); } - let Some(keys_ceremony_id) = elections[0].keys_ceremony_id.clone() else { + let first_election = elections.first().ok_or_else(|| anyhow!("No elections"))?; + let Some(keys_ceremony_id) = first_election.keys_ceremony_id.clone() else { return Err(anyhow!("Election has no keys ceremony")); }; @@ -223,6 +231,7 @@ fn generate_initial_tally_status( /// /// Postgres failures from `get_tally_session_highest_batch` / `insert_tally_session_contest`, or /// `Err` when a referenced contest id is missing from `contests_map`. +#[allow(clippy::implicit_hasher)] #[instrument(err, skip(hasura_transaction))] pub async fn insert_tally_session_contests( hasura_transaction: &Transaction<'_>, @@ -309,6 +318,7 @@ fn get_area_contests_for_election_ids( /// /// Permission/publish validation failures, area tree construction errors, keys ceremony issues, /// Postgres insert failures, missing bulletin boards, or electoral log write errors. +#[allow(clippy::too_many_lines)] #[instrument(err, skip(transaction))] pub async fn create_tally_ceremony( transaction: &Transaction<'_>, @@ -394,7 +404,7 @@ pub async fn create_tally_ceremony( .map(|contest| (contest.id.clone(), contest.clone())) .collect(); - let basic_areas = areas.iter().map(|area| area.into()).collect(); + let basic_areas = areas.iter().map(Into::into).collect(); let areas_tree = TreeNode::<()>::from_areas(basic_areas)?; event!(Level::INFO, "areas_tree {:?}", area_contests); @@ -432,9 +442,16 @@ pub async fn create_tally_ceremony( let tally_execution_status = match keys_ceremony_policy { CeremoniesPolicy::AUTOMATED_CEREMONIES => TallyExecutionStatus::IN_PROGRESS, - _ => TallyExecutionStatus::STARTED, + CeremoniesPolicy::MANUAL_CEREMONIES => TallyExecutionStatus::STARTED, }; + let threshold_i32 = i32::try_from(keys_ceremony.threshold).map_err(|_| { + anyhow!( + "Keys ceremony threshold {} does not fit in i32", + keys_ceremony.threshold + ) + })?; + let _tally_session = insert_tally_session( transaction, &tenant_id, @@ -444,7 +461,7 @@ pub async fn create_tally_ceremony( &tally_session_id, &keys_ceremony_id, tally_execution_status, - keys_ceremony.threshold as i32, + threshold_i32, Some(final_configuration.clone()), &tally_type, annotations, @@ -476,10 +493,6 @@ pub async fn create_tally_ceremony( ) .await?; - // get the election event - let election_event = - get_election_event_by_id(transaction, &tenant_id, &election_event_id).await?; - // Save this in the electoral log let board_name = get_election_event_board(election_event.bulletin_board_reference.clone()) .with_context(|| "missing bulletin board")?; @@ -528,24 +541,19 @@ pub async fn update_tally_ceremony( ) -> Result<()> { let current_status = tally_session .execution_status - .map(|value| { + .map_or(TallyExecutionStatus::STARTED, |value| { TallyExecutionStatus::from_str(&value).unwrap_or(TallyExecutionStatus::STARTED) - }) - .unwrap_or(TallyExecutionStatus::STARTED); + }); let expected_status: Vec = match current_status { - TallyExecutionStatus::STARTED => vec![TallyExecutionStatus::CANCELLED], - TallyExecutionStatus::CONNECTED => vec![ - TallyExecutionStatus::IN_PROGRESS, - TallyExecutionStatus::CANCELLED, - ], - TallyExecutionStatus::IN_PROGRESS => vec![TallyExecutionStatus::CANCELLED], - TallyExecutionStatus::AWAITING_INPUT => vec![ + TallyExecutionStatus::STARTED | TallyExecutionStatus::IN_PROGRESS => { + vec![TallyExecutionStatus::CANCELLED] + } + TallyExecutionStatus::CONNECTED | TallyExecutionStatus::AWAITING_INPUT => vec![ TallyExecutionStatus::IN_PROGRESS, TallyExecutionStatus::CANCELLED, ], - TallyExecutionStatus::SUCCESS => vec![], - TallyExecutionStatus::CANCELLED => vec![], + TallyExecutionStatus::SUCCESS | TallyExecutionStatus::CANCELLED => vec![], }; if !expected_status.contains(&new_execution_status) { @@ -574,7 +582,7 @@ pub async fn update_tally_ceremony( .collect::>() .len(); - if tally_session.threshold > num_connected_trustees as i64 + if tally_session.threshold > usize_count_to_i64(num_connected_trustees) && new_execution_status != TallyExecutionStatus::CANCELLED { return Err(anyhow!( @@ -622,7 +630,7 @@ pub async fn update_tally_ceremony( electoral_log .post_tally_open( - election_event_id.to_string(), + election_event_id.clone(), tally_elections_ids.clone(), Some(user_id), Some(username), @@ -641,6 +649,7 @@ pub async fn update_tally_ceremony( /// /// Missing sessions/executions, invalid execution states, unknown trustees, mismatched ciphertext /// (returns `Ok(false)`), Postgres failures, missing bulletin boards, or electoral log errors. +#[allow(clippy::too_many_lines)] #[instrument(err, skip(transaction))] pub async fn set_private_key( transaction: &Transaction<'_>, @@ -677,10 +686,9 @@ pub async fn set_private_key( let current_status = tally_session .execution_status - .map(|value| { + .map_or(TallyExecutionStatus::STARTED, |value| { TallyExecutionStatus::from_str(&value).unwrap_or(TallyExecutionStatus::STARTED) - }) - .unwrap_or(TallyExecutionStatus::STARTED); + }); if TallyExecutionStatus::STARTED != current_status && TallyExecutionStatus::CONNECTED != current_status @@ -767,7 +775,7 @@ pub async fn set_private_key( .collect::>(); // enough trustees connected, so change tally execution status to connected - if connected_trustees.len() as i64 >= keys_ceremony.threshold { + if usize_count_to_i64(connected_trustees.len()) >= keys_ceremony.threshold { update_tally_session_status( transaction.clone(), tenant_id, @@ -778,7 +786,6 @@ pub async fn set_private_key( ) .await?; } - println!("after update status"); // get the election event let election_event = get_election_event_by_id(transaction, tenant_id, election_event_id).await?; @@ -808,7 +815,7 @@ pub async fn set_private_key( .post_key_insertion( election_event_id.to_string(), found_trustee.name.clone(), - Some(user_id.to_string()), + Some(user_id.clone()), username.clone(), tally_elections_ids, ) @@ -862,10 +869,10 @@ pub async fn set_tally_session_completed( let username = annotations .get("executer_username") - .and_then(|val| val.as_str().map(|s| s.to_string())); + .and_then(|val| val.as_str().map(std::string::ToString::to_string)); let user_id = annotations .get("executer_user_id") - .and_then(|val| val.as_str().map(|s| s.to_string())); + .and_then(|val| val.as_str().map(std::string::ToString::to_string)); let board_name = get_election_event_board(election_event.bulletin_board_reference.clone()) .with_context(|| "missing bulletin board")?; @@ -881,7 +888,7 @@ pub async fn set_tally_session_completed( electoral_log .post_tally_close( - election_event_id.to_string(), + election_event_id.clone(), tally_elections_ids, user_id, username, diff --git a/packages/windmill/src/services/ceremonies/tally_progress.rs b/packages/windmill/src/services/ceremonies/tally_progress.rs index 621422599c..6e4f83b81f 100644 --- a/packages/windmill/src/services/ceremonies/tally_progress.rs +++ b/packages/windmill/src/services/ceremonies/tally_progress.rs @@ -10,14 +10,21 @@ use sequent_core::types::{ use std::collections::{HashMap, HashSet}; use tracing::{event, instrument, Level}; +/// Converts contest counts to `f64` for progress scaling (same as `as f64`). +#[allow(clippy::cast_precision_loss)] +#[inline] +const fn usize_to_f64(n: usize) -> f64 { + n as f64 +} + /// Collects distinct batch numbers from `messages` whose statement kind matches `kind`. #[instrument(skip_all)] -fn get_session_ids_by_type(messages: &[Message], kind: StatementType) -> Vec { +fn get_session_ids_by_type(messages: &[Message], kind: &StatementType) -> Vec { let mut plaintext_batch_ids: Vec = messages .iter() .map(|message| { - if kind == message.statement.get_kind() { - message.statement.get_batch_number() as i64 + if message.statement.get_kind().eq(kind) { + i64::try_from(message.statement.get_batch_number()).unwrap_or(-1) } else { -1i64 } @@ -60,17 +67,18 @@ pub async fn generate_tally_progress( .get(&contest.election_id) .cloned() .unwrap_or(vec![]); - batch_ids.push(contest.session_id as i64); + batch_ids.push(i64::from(contest.session_id)); complete_map.insert(contest.election_id.clone(), batch_ids.clone()); } - let finished_batch_ids: Vec = get_session_ids_by_type(messages, StatementType::Plaintexts); + let finished_batch_ids: Vec = + get_session_ids_by_type(messages, &StatementType::Plaintexts); let mut decrypting_batch_ids: Vec = - get_session_ids_by_type(messages, StatementType::DecryptionFactors); + get_session_ids_by_type(messages, &StatementType::DecryptionFactors); decrypting_batch_ids.retain(|value| !finished_batch_ids.contains(value)); - let mut mixing_batch_ids: Vec = get_session_ids_by_type(messages, StatementType::Mix); + let mut mixing_batch_ids: Vec = get_session_ids_by_type(messages, &StatementType::Mix); mixing_batch_ids.retain(|value| { !finished_batch_ids.contains(value) && !decrypting_batch_ids.contains(value) @@ -95,10 +103,10 @@ pub async fn generate_tally_progress( .len(); let total = election_batch_ids.len(); let mut progress: f64 = 100.0 - * (0.2 * (num_mixing_contests as f64) - + 0.4 * (num_decrypting_contests as f64) - + (num_finished_contests as f64)) - / (total as f64); + * (0.2 * usize_to_f64(num_mixing_contests) + + 0.4 * usize_to_f64(num_decrypting_contests) + + usize_to_f64(num_finished_contests)) + / usize_to_f64(total); // clamp values to 0-100 progress = progress.clamp(0.0, 100.0); let new_status = if num_finished_contests >= total { diff --git a/packages/windmill/src/services/ceremonies/tally_resolution.rs b/packages/windmill/src/services/ceremonies/tally_resolution.rs index 308ed78400..d3a200d0b1 100644 --- a/packages/windmill/src/services/ceremonies/tally_resolution.rs +++ b/packages/windmill/src/services/ceremonies/tally_resolution.rs @@ -23,6 +23,7 @@ use crate::services::electoral_log::ElectoralLog; /// Groups resolved IRV tie-break rows into a per-contest map keyed by the /// actual contest UUID. +#[must_use] pub fn build_tie_resolutions_map( resolutions: &[TallySessionResolution], ) -> HashMap> { @@ -52,6 +53,7 @@ pub fn build_tie_resolutions_map( /// Using `(contest_id, round_number)` as the key — rather than `contest_id` alone — /// allows area-level `ProcessBallotsAll` runs to produce independent resolutions for /// different rounds of the same contest without silently dropping any of them. +#[must_use] pub fn pending_resolution_exists( existing: &[TallySessionResolution], contest_id: &str, @@ -237,6 +239,7 @@ pub async fn handle_pending_irv_resolutions( /// Missing tally session or execution status, validation failures from [`validate_resolution_allowed`], /// missing resolutions, invalid candidate selections, Postgres update failures, or electoral log /// errors while recording tie outcomes. +#[allow(clippy::too_many_lines)] pub async fn submit_tally_resolution( hasura_transaction: &Transaction<'_>, tenant_id: &str, @@ -350,9 +353,13 @@ pub async fn submit_tally_resolution( let resolution_id = latest_resolution.id.clone(); let resubmission = is_resubmission(latest_resolution); - if !resubmission { - // First submission: resolve the existing pending record - submit_resolution( + if resubmission { + // Re-submission: admin changed their mind — update the existing record + info!( + "Re-submission detected for contest {} - updating existing resolution", + tie_resolution.contest_id + ); + update_resolution( hasura_transaction, tenant_id, election_event_id, @@ -362,7 +369,7 @@ pub async fn submit_tally_resolution( ) .await?; electoral_log - .post_tally_tie_resolved( + .post_tally_tie_resolution_updated( election_event_id.to_string(), tally_session.election_ids.clone(), tie_resolution.contest_id.clone(), @@ -371,14 +378,10 @@ pub async fn submit_tally_resolution( username.clone(), ) .await - .with_context(|| "error posting tally tie resolved to electoral log")?; + .with_context(|| "error posting tally tie resolution updated to electoral log")?; } else { - // Re-submission: admin changed their mind — update the existing record - info!( - "Re-submission detected for contest {} - updating existing resolution", - tie_resolution.contest_id - ); - update_resolution( + // First submission: resolve the existing pending record + submit_resolution( hasura_transaction, tenant_id, election_event_id, @@ -388,7 +391,7 @@ pub async fn submit_tally_resolution( ) .await?; electoral_log - .post_tally_tie_resolution_updated( + .post_tally_tie_resolved( election_event_id.to_string(), tally_session.election_ids.clone(), tie_resolution.contest_id.clone(), @@ -397,7 +400,7 @@ pub async fn submit_tally_resolution( username.clone(), ) .await - .with_context(|| "error posting tally tie resolution updated to electoral log")?; + .with_context(|| "error posting tally tie resolved to electoral log")?; } resolved_count = resolved_count @@ -484,6 +487,7 @@ pub fn extract_tied_candidate_ids( /// Returns `true` when the resolution already has a decision recorded (i.e. this is /// an admin changing their mind rather than the first submission). +#[must_use] pub fn is_resubmission(resolution: &TallySessionResolution) -> bool { resolution.status != TallySessionResolutionStatus::Pending } diff --git a/packages/windmill/src/services/ceremonies/velvet_tally.rs b/packages/windmill/src/services/ceremonies/velvet_tally.rs index c64e3a1b39..3150739d7d 100644 --- a/packages/windmill/src/services/ceremonies/velvet_tally.rs +++ b/packages/windmill/src/services/ceremonies/velvet_tally.rs @@ -19,7 +19,9 @@ use crate::services::reports::template_renderer::{ ReportOriginatedFrom, ReportOrigins, TemplateRenderer, }; use crate::services::tally_sheets::tally::create_tally_sheets_map; -use crate::services::temp_path::*; +use crate::services::temp_path::{ + PUBLIC_ASSETS_LOGO_IMG, PUBLIC_ASSETS_QRCODE_LIB, VELVET_BALLOT_IMAGES_TEMPLATE_TITLE, +}; use anyhow::{anyhow, Context, Result}; use deadpool_postgres::{Client as DbClient, Transaction}; use rusqlite::Connection; @@ -129,6 +131,7 @@ fn decode_plaintexts_to_biguints( /// /// Filesystem errors, JSON serialization failures, UUID parse errors in config builders, or CSV I/O /// issues while appending multi-contest ballots. +#[allow(clippy::implicit_hasher)] #[instrument(skip_all, err)] pub fn prepare_tally_for_area_contest( base_tempdir: PathBuf, @@ -200,7 +203,7 @@ pub fn prepare_tally_for_area_contest( let area_config = AreaConfig { id: parse_uuid_v4(&area_id)?, - name: area_contest.area.name.clone().unwrap_or("".into()), + name: area_contest.area.name.clone().unwrap_or_default(), tenant_id: parse_uuid_v4(&area_contest.contest.tenant_id)?, election_event_id: parse_uuid_v4(&area_contest.contest.election_event_id)?, election_id: parse_uuid_v4(&election_id)?, @@ -262,6 +265,7 @@ pub fn prepare_tally_for_area_contest( /// # Errors /// /// UUID parsing, date computation failures, or filesystem/serialization errors while writing JSON. +#[allow(clippy::implicit_hasher)] #[instrument(skip_all, err)] pub fn create_election_configs_blocking( base_tempdir: PathBuf, @@ -278,8 +282,9 @@ pub fn create_election_configs_blocking( let election_event_annotations: HashMap = election_event .annotations .clone() - .map(|annotations| deserialize_value(annotations).unwrap_or(Default::default())) + .and_then(|annotations| deserialize_value(annotations).ok()) .unwrap_or_default(); + for area_contest in area_contests { let election_id = area_contest.contest.election_id.clone(); let election_event_id = area_contest.contest.election_event_id.clone(); @@ -291,18 +296,13 @@ pub fn create_election_configs_blocking( let election_name_opt = election_opt.map(|election| election.get_name(&default_lang)); let election_alias_otp = election_opt.and_then(|e| e.get_alias(&default_lang)); - let election_description = election_opt - .map(|election| election.description.clone().unwrap_or("".to_string())) - .unwrap_or("".to_string()); + let election_description = election_opt.map_or(String::new(), |election| { + election.description.clone().unwrap_or_default() + }); let election_annotations: HashMap = election_opt - .map(|election| { - election - .annotations - .clone() - .map(|annotations| deserialize_value(annotations).unwrap_or(Default::default())) - .unwrap_or(Default::default()) - }) + .and_then(|election| election.annotations.clone()) + .and_then(|annotations| deserialize_value(annotations).ok()) .unwrap_or_default(); let election_presentation = @@ -325,20 +325,17 @@ pub fn create_election_configs_blocking( Some(election) => election.clone(), None => ElectionConfig { id: parse_uuid_v4(&election_id)?, - name: election_name_opt.unwrap_or("".to_string()), - alias: election_alias_otp.unwrap_or("".to_string()), + name: election_name_opt.unwrap_or_default(), + alias: election_alias_otp.unwrap_or_default(), description: election_description, annotations: election_annotations.clone(), election_event_annotations: election_event_annotations.clone(), dates: election_dates, tenant_id: parse_uuid_v4(&area_contest.contest.tenant_id)?, election_event_id: parse_uuid_v4(&area_contest.contest.election_event_id)?, - census: election_cast_votes_count - .map(|data| data.census as u64) - .unwrap_or(0), + census: election_cast_votes_count.map_or(0, |data| data.census.cast_unsigned()), total_votes: election_cast_votes_count - .map(|data| data.cast_votes as u64) - .unwrap_or(0), + .map_or(0, |data| data.cast_votes.cast_unsigned()), ballot_styles: vec![], areas: areas.clone(), presentation: election_presentation.clone(), @@ -538,7 +535,7 @@ pub async fn call_velvet(base_tally_path: PathBuf, pipe_id: &str) -> Result>, ) -> Result { - let basic_areas: Vec = areas.iter().map(|area| area.into()).collect(); + let basic_areas: Vec = areas.iter().map(Into::into).collect(); // map<(area_id,contest_id), tally_sheet> let tally_sheet_map = create_tally_sheets_map(tally_sheets); for area_contest in area_contests { diff --git a/packages/windmill/src/services/certificate_authority.rs b/packages/windmill/src/services/certificate_authority.rs index 8aab606868..a50426d8b7 100644 --- a/packages/windmill/src/services/certificate_authority.rs +++ b/packages/windmill/src/services/certificate_authority.rs @@ -31,6 +31,7 @@ pub struct ParsedCertificate { /// Splits a PEM bundle (potentially containing multiple certificates) into /// individual PEM strings, one per certificate. +#[must_use] pub fn split_pem_bundle(pem_content: &str) -> Vec { let mut certs = Vec::new(); let mut current = String::new(); @@ -154,6 +155,7 @@ fn parse_openssl_x509_output(output: &str, pem: &str) -> Result Option { for part in rdns.split(',') { let part = part.trim(); diff --git a/packages/windmill/src/services/cloudflare.rs b/packages/windmill/src/services/cloudflare.rs index 065df88e77..ce55086988 100644 --- a/packages/windmill/src/services/cloudflare.rs +++ b/packages/windmill/src/services/cloudflare.rs @@ -93,6 +93,7 @@ pub struct CloudflareError { impl CloudflareError { /// Create a new Cloudflare error. + #[must_use] pub fn new(msg: &str) -> CloudflareError { CloudflareError { details: msg.to_string(), @@ -214,7 +215,7 @@ pub async fn get_ruleset_by_phase( .into_iter() .find(|ruleset| ruleset.phase == ruleset_phase && ruleset.kind == "zone") { - Some(ruleset) => Some(ruleset.id.to_string()), + Some(ruleset) => Some(ruleset.id.clone()), None => None, }; diff --git a/packages/windmill/src/services/compress.rs b/packages/windmill/src/services/compress.rs index ca10d65100..63126d1343 100644 --- a/packages/windmill/src/services/compress.rs +++ b/packages/windmill/src/services/compress.rs @@ -31,7 +31,7 @@ pub fn create_archive_from_folder( ) -> Result<(TempPath, String, u64)> { let extension = if compress { ".tar.gz" } else { ".tar" }; let tar_temp_file = generate_temp_file("tally-", extension) - .with_context(|| format!("Error generating temporary {} file", extension))?; + .with_context(|| format!("Error generating temporary {extension} file"))?; // Reopen the temp file for writing. This handle will be used by the archiver/compressor. let file_write_handle = tar_temp_file diff --git a/packages/windmill/src/services/consolidation/acm_json.rs b/packages/windmill/src/services/consolidation/acm_json.rs index b5c5600819..c9d74984bd 100644 --- a/packages/windmill/src/services/consolidation/acm_json.rs +++ b/packages/windmill/src/services/consolidation/acm_json.rs @@ -40,23 +40,27 @@ const DEFAULT_MIRU_IP_ADDRESS: &str = "192.168.1.67"; const DEFAULT_MIRU_MAC_ADDRESS: &str = "3C:7E:5A:89:4D:2F"; /// Reads `MIRU_DEVICE_ID` or falls back to [`DEFAULT_MIRU_DEVICE_ID`]. +#[must_use] pub fn get_miru_device_id() -> String { - env::var("MIRU_DEVICE_ID").unwrap_or(DEFAULT_MIRU_DEVICE_ID.to_string()) + env::var("MIRU_DEVICE_ID").unwrap_or_else(|_| DEFAULT_MIRU_DEVICE_ID.to_string()) } /// Reads `MIRU_SERIAL_NUMBER` or falls back to [`DEFAULT_MIRU_SERIAL_NUMBER`]. +#[must_use] pub fn get_miru_serial_number() -> String { - env::var("MIRU_SERIAL_NUMBER").unwrap_or(DEFAULT_MIRU_SERIAL_NUMBER.to_string()) + env::var("MIRU_SERIAL_NUMBER").unwrap_or_else(|_| DEFAULT_MIRU_SERIAL_NUMBER.to_string()) } /// Reads `MIRU_IP_ADDRESS` or falls back to [`DEFAULT_MIRU_IP_ADDRESS`]. +#[must_use] pub fn get_miru_ip_address() -> String { - env::var("MIRU_IP_ADDRESS").unwrap_or(DEFAULT_MIRU_IP_ADDRESS.to_string()) + env::var("MIRU_IP_ADDRESS").unwrap_or_else(|_| DEFAULT_MIRU_IP_ADDRESS.to_string()) } /// Reads `_MIRU_MAC_ADDRESS` or falls back to [`DEFAULT_MIRU_MAC_ADDRESS`]. +#[must_use] pub fn get_miru_mac_address() -> String { - env::var("_MIRU_MAC_ADDRESS").unwrap_or(DEFAULT_MIRU_MAC_ADDRESS.to_string()) + env::var("_MIRU_MAC_ADDRESS").unwrap_or_else(|_| DEFAULT_MIRU_MAC_ADDRESS.to_string()) } /// Loads the tenant/event ECIES key pair from the vault, or generates and stores a new one. @@ -126,7 +130,7 @@ pub fn generate_acm_json( Ok(ACMJson { device_id: get_miru_device_id(), serial_number: get_miru_serial_number(), - station_id: area_annotations.station_id.to_string(), + station_id: area_annotations.station_id.clone(), station_name: area_annotations.station_name.clone(), event_id: election_event_annotations.event_id.clone(), event_name: election_event_annotations.event_name.clone(), diff --git a/packages/windmill/src/services/consolidation/acm_transaction.rs b/packages/windmill/src/services/consolidation/acm_transaction.rs index c62b3fa0d1..d368e1d4c9 100644 --- a/packages/windmill/src/services/consolidation/acm_transaction.rs +++ b/packages/windmill/src/services/consolidation/acm_transaction.rs @@ -17,16 +17,20 @@ const RANDOM_PART: u64 = 13212; /// /// Panics only if arithmetic around year/hour/second components or the final product overflows /// (should not occur for real wall-clock times). +#[must_use] pub fn generate_transaction_id() -> u64 { let now = Utc::now(); - let year = (now.year() as u64) - .checked_sub(2023u64) - .expect("year component underflow"); // Last two digits of the year (offset base) - let day = now.ordinal() as u64; // Day of the year (1 to 366) - let hour = (now.hour() as u64) + let year = u64::try_from( + now.year() + .checked_sub(2023) + .expect("year component underflow"), + ) + .expect("year offset fits u64"); // Last two digits of the year (offset base) + let day = u64::from(now.ordinal()); // Day of the year (1 to 366) + let hour = u64::from(now.hour()) .checked_add(1) .expect("hour component overflow"); - let second = (now.second() as u64) + let second = u64::from(now.second()) .checked_add(1) .expect("second component overflow"); diff --git a/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs b/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs index ea9dd8bf8c..c882536bd6 100644 --- a/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs +++ b/packages/windmill/src/services/consolidation/aes_256_cbc_encrypt.rs @@ -40,7 +40,7 @@ pub fn encrypt_file_aes_256_cbc( // Check if the command was successful if !output.status.success() { - return Err(anyhow::anyhow!("Command failed: {:?}", output)); + return Err(anyhow::anyhow!("Command failed: {output:?}")); } Ok(()) @@ -74,7 +74,7 @@ pub fn decrypt_file_aes_256_cbc( // Check if the command was successful if !output.status.success() { - return Err(anyhow::anyhow!("Command failed: {:?}", output)); + return Err(anyhow::anyhow!("Command failed: {output:?}")); } Ok(()) diff --git a/packages/windmill/src/services/consolidation/create_transmission_package_service.rs b/packages/windmill/src/services/consolidation/create_transmission_package_service.rs index 720af2ef3e..9be69d7a17 100644 --- a/packages/windmill/src/services/consolidation/create_transmission_package_service.rs +++ b/packages/windmill/src/services/consolidation/create_transmission_package_service.rs @@ -50,7 +50,9 @@ use sequent_core::types::date_time::TimeZone; use sequent_core::types::hasura::core::Document; use sequent_core::types::results::{ResultDocumentType, ResultDocuments}; use sequent_core::util::date_time::PHILIPPINO_TIMEZONE; -use sequent_core::util::temp_path::*; +use sequent_core::util::temp_path::{ + generate_temp_file, get_file_size, write_into_named_temp_file, +}; use tempfile::{tempdir, NamedTempFile}; use tracing::{info, instrument}; use uuid::Uuid; @@ -187,8 +189,8 @@ pub async fn generate_all_servers_document( for ccs_server in ccs_servers { let server_path = temp_dir_path.join(&ccs_server.tag); std::fs::create_dir(server_path.clone()) - .with_context(|| format!("Error generating directory {server_path:?}"))?; - let zip_file_path = server_path.join(format!("er_{}.zip", area_annotations.station_id)); + .with_context(|| format!("Error generating directory {}", server_path.display()))?; + let er_zip_file_path = server_path.join(format!("er_{}.zip", area_annotations.station_id)); create_transmission_package( eml_hash, eml, @@ -199,14 +201,15 @@ pub async fn generate_all_servers_document( &acm_key_pair, &ccs_server.public_key_pem, area_annotations, - &zip_file_path, + &er_zip_file_path, &server_signatures, election_annotations, ) .await?; let with_logs = ccs_server.send_logs.unwrap_or_default(); if with_logs { - let zip_file_path = server_path.join(format!("al_{}.zip", area_annotations.station_id)); + let al_zip_file_path = + server_path.join(format!("al_{}.zip", area_annotations.station_id)); create_logs_package( time_zone.clone(), now_utc, @@ -215,7 +218,7 @@ pub async fn generate_all_servers_document( &acm_key_pair, &ccs_server.public_key_pem, area_annotations, - &zip_file_path, + &al_zip_file_path, &server_signatures, logs, ) @@ -253,6 +256,7 @@ pub async fn generate_all_servers_document( /// # Errors /// /// Any failure in annotation validation, Velvet/tally prep, document upload, or persistence. +#[allow(clippy::too_many_lines)] #[instrument(err)] pub async fn create_transmission_package_service( tenant_id: &str, @@ -369,7 +373,7 @@ pub async fn create_transmission_package_service( }; basic_area.id == area_id }) - .map(|report_computed| report_computed.into()) + .map(Into::into) .collect(); let (base_compressed_xml, eml, eml_hash) = generate_base_compressed_xml( tally_id, @@ -384,16 +388,16 @@ pub async fn create_transmission_package_service( .await?; // upload .xz - let xz_name = format!("er_{}.xz", transaction_id); - let (temp_path, temp_path_string, file_size) = + let xz_name = format!("er_{transaction_id}.xz"); + let (xz_temp_path, xz_temp_path_string, xz_file_size) = write_into_named_temp_file(&base_compressed_xml, &xz_name, ".xz")?; let xz_document = upload_and_return_document( &hasura_transaction, - &temp_path_string, - file_size, + &xz_temp_path_string, + xz_file_size, "applization/xml", tenant_id, - Some(election_event.id.to_string()), + Some(election_event.id.clone()), &xz_name, None, false, @@ -402,22 +406,22 @@ pub async fn create_transmission_package_service( // upload eml let eml_name = format!("er_{transaction_id}.xml"); - let (temp_path, temp_path_string, file_size) = + let (_eml_temp_path, eml_temp_path_string, eml_file_size) = write_into_named_temp_file(&eml.as_bytes().to_vec(), &eml_name, ".eml")?; let eml_document = upload_and_return_document( &hasura_transaction, - &temp_path_string, - file_size, + &eml_temp_path_string, + eml_file_size, "applization/xml", tenant_id, - Some(election_event.id.to_string()), + Some(election_event.id.clone()), &eml_name, None, false, ) .await?; - let area_name = area.name.clone().unwrap_or("".into()); + let area_name = area.name.clone().unwrap_or_default(); let mut logs = if let Some(package) = found_package { package.logs.clone() } else { diff --git a/packages/windmill/src/services/consolidation/eml_generator.rs b/packages/windmill/src/services/consolidation/eml_generator.rs index 81e87c2335..c070e09135 100644 --- a/packages/windmill/src/services/consolidation/eml_generator.rs +++ b/packages/windmill/src/services/consolidation/eml_generator.rs @@ -4,15 +4,18 @@ //! Maps Velvet report rows and Miru-prefixed Hasura annotations into EML-shaped JSON. -use super::eml_types::*; -use crate::types::miru_plugin::*; +use super::eml_types::{ + EMLAffiliation, EMLCandidate, EMLContest, EMLCount, EMLCountMetric, EMLElection, EMLFile, + EMLHeader, EMLIdentifier, EMLOfficialStatusDetail, EMLSelection, EMLStatusItem, EMLTotalVotes, +}; +use crate::types::miru_plugin::{MiruCcsServer, MiruSbeiUser, MiruTallySessionData}; use anyhow::{anyhow, Context, Result}; use chrono::{DateTime, Utc}; use sequent_core::{ - ballot::*, + ballot::{Annotations, Candidate, Contest}, serialization::deserialize_with_path::{deserialize_str, deserialize_value}, types::{ - date_time::*, + date_time::{DateFormat, TimeZone}, hasura::core::{self, ElectionEvent, Trustee}, }, util::date_time::generate_timestamp, @@ -119,17 +122,17 @@ impl GetMetrics for ContestResult { EMLCountMetric { kind: "Total Number of Over Votes".into(), id: "OV".into(), - datum: extended_metrics.over_votes as i64, + datum: extended_metrics.over_votes.cast_signed(), }, EMLCountMetric { kind: "Total Number of Under Votes".into(), id: "UV".into(), - datum: extended_metrics.under_votes as i64, + datum: extended_metrics.under_votes.cast_signed(), }, EMLCountMetric { kind: "Total Number of Votes Actually".into(), id: "VV".into(), - datum: extended_metrics.votes_actually as i64, + datum: extended_metrics.votes_actually.cast_signed(), }, EMLCountMetric { kind: "Total Number of Registered Voters".into(), @@ -139,7 +142,7 @@ impl GetMetrics for ContestResult { EMLCountMetric { kind: "Total Number of Expected Votes".into(), id: "EV".into(), - datum: extended_metrics.expected_votes as i64, + datum: extended_metrics.expected_votes.cast_signed(), }, EMLCountMetric { kind: "Number of Zero Outs Executed".into(), @@ -154,7 +157,7 @@ impl GetMetrics for ContestResult { EMLCountMetric { kind: "Total Number of Valid Ballots".into(), id: "VB".into(), - datum: self.total_valid_votes as i64, + datum: self.total_valid_votes.cast_signed(), }, EMLCountMetric { kind: "Total Number of Stamped Ballots".into(), @@ -164,17 +167,17 @@ impl GetMetrics for ContestResult { EMLCountMetric { kind: "Total Number of Ballots In Ballot Box".into(), id: "BB".into(), - datum: self.total_votes as i64, + datum: self.total_votes.cast_signed(), }, EMLCountMetric { kind: "Abstentions".into(), id: "AB".into(), - datum: self.total_blank_votes as i64, + datum: self.total_blank_votes.cast_signed(), }, EMLCountMetric { kind: "Total Number of Invalid Ballots".into(), id: "IB".into(), - datum: self.total_invalid_votes as i64, + datum: self.total_invalid_votes.cast_signed(), }, EMLCountMetric { kind: "Total Number of Misread Ballots".into(), @@ -345,7 +348,7 @@ impl ValidateAnnotations for ElectionEvent { let annotations_js = self .annotations .clone() - .unwrap_or_else(|| Value::Object(Default::default())); + .unwrap_or_else(|| Value::Object(serde_json::Map::default())); let annotations: Annotations = deserialize_value(annotations_js).unwrap_or_default(); @@ -475,7 +478,7 @@ impl ValidateAnnotations for core::Election { let annotations_js = self .annotations .clone() - .unwrap_or_else(|| Value::Object(Default::default())); + .unwrap_or_else(|| Value::Object(serde_json::Map::default())); let annotations: Annotations = deserialize_value(annotations_js)?; @@ -581,7 +584,7 @@ impl ValidateAnnotations for core::Area { })?; let ccs_servers: Vec = - deserialize_str(&ccs_servers_js).map_err(|err| anyhow!("{}", err))?; + deserialize_str(&ccs_servers_js).map_err(|err| anyhow!("{err}"))?; let sbei_usernames_js = find_miru_annotation(MIRU_AREA_TRUSTEE_USERS, &annotations) .with_context(|| { @@ -591,7 +594,7 @@ impl ValidateAnnotations for core::Area { })?; let sbei_usernames: Vec = - deserialize_str(&sbei_usernames_js).map_err(|err| anyhow!("{}", err))?; + deserialize_str(&sbei_usernames_js).map_err(|err| anyhow!("{err}"))?; let country = find_miru_annotation(MIRU_AREA_COUNTRY, &annotations).with_context(|| { format!("Missing area annotation: '{MIRU_PLUGIN_PREPEND}:{MIRU_AREA_COUNTRY}'") @@ -626,7 +629,7 @@ impl ValidateAnnotations for core::Area { let annotations_js = self .annotations .clone() - .unwrap_or_else(|| Value::Object(Default::default())); + .unwrap_or_else(|| Value::Object(serde_json::Map::default())); let annotations: Annotations = deserialize_value(annotations_js).unwrap_or_default(); @@ -695,7 +698,7 @@ impl ValidateAnnotations for core::TallySession { })?; let tally_session_data: MiruTallySessionData = - deserialize_str(&tally_session_data_js).map_err(|err| anyhow!("{}", err))?; + deserialize_str(&tally_session_data_js).map_err(|err| anyhow!("{err}"))?; Ok(tally_session_data) } @@ -708,7 +711,7 @@ impl ValidateAnnotations for core::TallySession { let annotations_js = self .annotations .clone() - .unwrap_or_else(|| Value::Object(Default::default())); + .unwrap_or_else(|| Value::Object(serde_json::Map::default())); let annotations: Annotations = deserialize_value(annotations_js).unwrap_or_default(); let tally_session_data_js = find_miru_annotation_opt(MIRU_TALLY_SESSION_DATA, &annotations)?.unwrap_or_default(); @@ -950,7 +953,7 @@ pub fn render_eml_contest( }; Ok(EMLSelection { candidates: vec![candidate.clone()], - valid_votes: candidate_result.total_count as i64, + valid_votes: candidate_result.total_count.cast_signed(), }) }) .collect::, _>>()?; diff --git a/packages/windmill/src/services/consolidation/logs.rs b/packages/windmill/src/services/consolidation/logs.rs index 3476885941..38964a3031 100644 --- a/packages/windmill/src/services/consolidation/logs.rs +++ b/packages/windmill/src/services/consolidation/logs.rs @@ -21,13 +21,12 @@ pub fn create_transmission_package_log( Log { created_date: ISO8601::to_string(datetime), log_text: format!( - "Created transmission package xml for election '{}' ({}) and area '{}' ({})", - election_id, election_name, area_id, area_name + "Created transmission package xml for election '{election_id}' ({election_name}) and area '{area_id}' ({area_name})", ), } } -/// Log line when audit logs are POSTed to a CCS server. +/// Log line when audit logs are `POSTed` to a CCS server. #[instrument(skip_all)] pub fn send_logs_to_ccs_log( datetime: &DateTime, @@ -41,8 +40,7 @@ pub fn send_logs_to_ccs_log( Log { created_date: ISO8601::to_string(datetime), log_text: format!( - "Sent logs for election '{}' ({}) and area '{}' ({}) to server '{}' ({}).", - election_id, election_name, area_id, area_name, server_name, server_address, + "Sent logs for election '{election_id}' ({election_name}) and area '{area_id}' ({area_name}) to server '{server_name}' ({server_address}).", ), } } @@ -57,13 +55,12 @@ pub fn send_transmission_package_to_ccs_log( area_name: &str, server_name: &str, server_address: &str, - trustees: Vec, + trustees: &[String], ) -> Log { Log { created_date: ISO8601::to_string(datetime), log_text: format!( - "Sent transmission package xml for election '{}' ({}) and area '{}' ({}) to server '{}' ({}), signed by [{}].", - election_id, election_name, area_id, area_name, server_name, server_address, + "Sent transmission package xml for election '{election_id}' ({election_name}) and area '{area_id}' ({area_name}) to server '{server_name}' ({server_address}), signed by [{}].", trustees.join(", ") ), } @@ -84,9 +81,7 @@ pub fn error_sending_logs_to_ccs_log( Log { created_date: ISO8601::to_string(datetime), log_text: format!( - "Error sending logs for election '{}' ({}) and area '{}' ({}) to server '{}' ({}): Error '{}'", - election_id, election_name, area_id, area_name, server_name, server_address, - error + "Error sending logs for election '{election_id}' ({election_name}) and area '{area_id}' ({area_name}) to server '{server_name}' ({server_address}): Error '{error}'", ), } } @@ -101,15 +96,14 @@ pub fn error_sending_transmission_package_to_ccs_log( area_name: &str, server_name: &str, server_address: &str, - trustees: Vec, + trustees: &[String], error: &str, ) -> Log { Log { created_date: ISO8601::to_string(datetime), log_text: format!( - "Error sending transmission package xml for election '{}' ({}) and area '{}' ({}) to server '{}' ({}), signed by {}: Error '{}'", - election_id, election_name, area_id, area_name, server_name, server_address, - trustees.join(", "), error + "Error sending transmission package xml for election '{election_id}' ({election_name}) and area '{area_id}' ({area_name}) to server '{server_name}' ({server_address}), signed by {}: Error '{error}'", + trustees.join(", "), ), } } @@ -127,8 +121,7 @@ pub fn sign_transmission_package_log( Log { created_date: ISO8601::to_string(datetime), log_text: format!( - "Signed transmission package xml for election '{}' ({}) and area '{}' ({}) by sbei '{}'", - election_id, election_name, area_id, area_name, sbei_id + "Signed transmission package xml for election '{election_id}' ({election_name}) and area '{area_id}' ({area_name}) by sbei '{sbei_id}'", ), } } diff --git a/packages/windmill/src/services/consolidation/rsa.rs b/packages/windmill/src/services/consolidation/rsa.rs index 489d9f916e..2213f9a0e7 100644 --- a/packages/windmill/src/services/consolidation/rsa.rs +++ b/packages/windmill/src/services/consolidation/rsa.rs @@ -4,7 +4,7 @@ //! RSA key generation and PKCS#12-driven signing via the bundled ECIES Java helper. -use anyhow::{Context, Result}; +use anyhow::{anyhow, Context, Result}; use openssl::rsa::{Padding, Rsa}; use sequent_core::signatures::ecies_encrypt::ECIES_TOOL_PATH; use sequent_core::signatures::shell::run_shell_command; @@ -49,7 +49,9 @@ pub fn encrypt_with_rsa_private_key(private_key_pem: &str, data: &[u8]) -> Resul .context("Failed to parse private key from PEM format")?; // Create a buffer to hold the encrypted data - let mut encrypted_data = vec![0; rsa.size() as usize]; + let key_size = + usize::try_from(rsa.size()).map_err(|_| anyhow!("RSA key size does not fit usize"))?; + let mut encrypted_data = vec![0; key_size]; // Encrypt the data using the RSA private key let encrypted_len = rsa @@ -68,10 +70,8 @@ pub fn encrypt_with_rsa_private_key(private_key_pem: &str, data: &[u8]) -> Resul /// /// Shell command failures from [`run_shell_command`]. pub fn derive_public_key_from_p12(pk12_file_path_string: &str, password: &str) -> Result { - let command = format!( - "java -jar {} public-key {} {}", - ECIES_TOOL_PATH, pk12_file_path_string, password - ); + let command = + format!("java -jar {ECIES_TOOL_PATH} public-key {pk12_file_path_string} {password}"); let public_pem = run_shell_command(&command)?.replace("\n\n", "\n"); @@ -92,11 +92,10 @@ pub fn rsa_sign_data( data_path: &str, ) -> Result { let command = format!( - "java -jar {} sign-rsa {} {} {}", - ECIES_TOOL_PATH, pk12_file_path_string, data_path, password + "java -jar {ECIES_TOOL_PATH} sign-rsa {pk12_file_path_string} {data_path} {password}" ); - let encrypted_base64 = run_shell_command(&command)?.replace("\n", ""); + let encrypted_base64 = run_shell_command(&command)?.replace('\n', ""); info!("ecies_sign_data: '{}'", encrypted_base64); diff --git a/packages/windmill/src/services/consolidation/send_transmission_package_service.rs b/packages/windmill/src/services/consolidation/send_transmission_package_service.rs index ef1e5eb661..964290df31 100644 --- a/packages/windmill/src/services/consolidation/send_transmission_package_service.rs +++ b/packages/windmill/src/services/consolidation/send_transmission_package_service.rs @@ -78,8 +78,8 @@ async fn send_package_to_ccs_server( SEND_ELECTION_RESULTS_API_PATH }; - let uri = format!("{}{}", ccs_server.address, base_url); - info!("Sending package to url {}", uri); + let uri = format!("{}{base_url}", ccs_server.address); + info!("Sending package to url {uri}"); let client = reqwest::Client::builder() .danger_accept_invalid_certs(true) .build()?; @@ -101,9 +101,8 @@ async fn send_package_to_ccs_server( .map_err(|err| anyhow!("{err:?}"))?; let response_str = format!("{response:?}"); info!( - "Response code: {}. Response: '{}'", + "Response code: {}. Response: '{response_str}'", response.status(), - response_str ); let is_success = response.status().is_success(); let text = response.text().await?; @@ -118,6 +117,7 @@ async fn send_package_to_ccs_server( } /// Picks the document with the latest `created_at` among `input_documents`. +#[must_use] #[instrument(skip_all)] pub fn get_latest_miru_document(input_documents: &[MiruDocument]) -> Option { let mut documents = input_documents.to_owned(); @@ -128,13 +128,7 @@ pub fn get_latest_miru_document(input_documents: &[MiruDocument]) -> Option b_date { - Ordering::Less - } else if a_date < b_date { - Ordering::Greater - } else { - Ordering::Equal - } + a_date.cmp(&b_date).reverse() }); documents.first().cloned() } @@ -325,6 +319,7 @@ async fn record_new_log( /// # Errors /// /// Missing documents, HTTP send failures, or DB update errors before all destinations complete. +#[allow(clippy::too_many_lines)] #[instrument(err)] pub async fn send_transmission_package_service( tenant_id: &str, @@ -364,7 +359,7 @@ pub async fn send_transmission_package_service( .await .with_context(|| format!("Error fetching area {area_id}"))? .ok_or_else(|| anyhow!("Can't find area {area_id}"))?; - let area_name = area.name.clone().unwrap_or("".into()); + let area_name = area.name.clone().unwrap_or_default(); let area_annotations = area.get_annotations()?; let tally_session = get_tally_session_by_id( @@ -399,10 +394,8 @@ pub async fn send_transmission_package_service( ) .await? .ok_or_else(|| { - anyhow!( - "Can't find document {}", - miru_document.document_ids.all_servers - ) + let id = &miru_document.document_ids.all_servers; + anyhow!("Can't find document {id}") })?; let mut compressed_zip = get_document_as_temp_file(tenant_id, &document).await?; @@ -420,9 +413,10 @@ pub async fn send_transmission_package_service( .map(|value| value.name.clone()) .collect(); - if transmission_area_election.threshold > -1 - && (miru_document.signatures.len() as i64) < transmission_area_election.threshold - { + let threshold_met = usize::try_from(transmission_area_election.threshold) + .map(|thresh| miru_document.signatures.len() < thresh) + .unwrap_or(false); + if transmission_area_election.threshold > -1 && threshold_met { info!( "Can't send to servers as number of signatures {} is less than threshold {}", miru_document.signatures.len(), @@ -444,8 +438,14 @@ pub async fn send_transmission_package_service( second_zip_folder_path.join(format!("er_{}.zip", area_annotations.station_id)); let election_name = election.get_name(&election.get_default_language()); match send_package_to_ccs_server(&second_zip_path, ccs_server, false).await { - Ok(_) => { + Ok(()) => { let time_now = Local::now(); + let sbei_miru_ids: Vec = new_miru_document + .signatures + .clone() + .into_iter() + .map(|signature| signature.sbei_miru_id.clone()) + .collect(); let new_log = send_transmission_package_to_ccs_log( &time_now, election_id, @@ -454,12 +454,7 @@ pub async fn send_transmission_package_service( &area_name, &ccs_server.name, &ccs_server.address, - new_miru_document - .signatures - .clone() - .into_iter() - .map(|signature| signature.sbei_miru_id.clone()) - .collect(), + &sbei_miru_ids, ); new_miru_document.servers_sent_to.push(MiruServerDocument { name: ccs_server.name.clone(), @@ -480,6 +475,12 @@ pub async fn send_transmission_package_service( Err(err) => { let error_str = format!("{err:?}"); let time_now = Local::now(); + let sbei_miru_ids: Vec = new_miru_document + .signatures + .clone() + .into_iter() + .map(|signature| signature.sbei_miru_id.clone()) + .collect(); let new_log = error_sending_transmission_package_to_ccs_log( &time_now, election_id, @@ -488,12 +489,7 @@ pub async fn send_transmission_package_service( &area_name, &ccs_server.name, &ccs_server.address, - new_miru_document - .signatures - .clone() - .into_iter() - .map(|signature| signature.sbei_miru_id.clone()) - .collect(), + &sbei_miru_ids, &error_str, ); new_miru_document.servers_sent_to.push(MiruServerDocument { @@ -518,7 +514,7 @@ pub async fn send_transmission_package_service( second_zip_folder_path.join(format!("al_{}.zip", area_annotations.station_id)); if with_logs { match send_package_to_ccs_server(&logs_zip_path, ccs_server, true).await { - Ok(_) => { + Ok(()) => { let new_log = send_logs_to_ccs_log( &Local::now(), election_id, diff --git a/packages/windmill/src/services/consolidation/signatures.rs b/packages/windmill/src/services/consolidation/signatures.rs index 76d5dfe439..adea9a97cd 100644 --- a/packages/windmill/src/services/consolidation/signatures.rs +++ b/packages/windmill/src/services/consolidation/signatures.rs @@ -9,7 +9,7 @@ use openssl::pkcs12::Pkcs12; use openssl::pkey::PKey; use sequent_core::signatures::ecies_encrypt::ECIES_TOOL_PATH; use sequent_core::signatures::shell::run_shell_command; -use sequent_core::util::temp_path::*; +use sequent_core::util::temp_path::generate_temp_file; use std::fs; use std::io::Read; use tempfile::{tempdir, NamedTempFile, TempPath}; @@ -51,7 +51,7 @@ pub fn ecdsa_sign_data( "java -jar {ECIES_TOOL_PATH} sign-ec {pk12_file_path_string} {data_path} {password}" ); - let encrypted_base64 = run_shell_command(&command)?.replace("\n", ""); + let encrypted_base64 = run_shell_command(&command)?.replace('\n', ""); info!("ecdsa_sign_data: '{encrypted_base64}'"); @@ -90,7 +90,7 @@ pub fn get_p12_fingerprint(p12_cert_path: &TempPath) -> Result { let fingerprint_command = format!("openssl x509 -in {cert_temp_path_string} -noout -fingerprint -sha256",); - let fingerprint = run_shell_command(&fingerprint_command)?.replace("\n", ""); + let fingerprint = run_shell_command(&fingerprint_command)?.replace('\n', ""); Ok(fingerprint) } @@ -126,7 +126,7 @@ pub fn check_certificate_cas( intermediate_ca_file_path.to_string_lossy(), p12_cert_path.to_string_lossy(), ); - let verify_result = run_shell_command(&verify_command)?.replace("\n", ""); + let verify_result = run_shell_command(&verify_command)?.replace('\n', ""); if !verify_result.ends_with(": OK") { return Err(anyhow!(verify_result)); diff --git a/packages/windmill/src/services/consolidation/transmission_package.rs b/packages/windmill/src/services/consolidation/transmission_package.rs index 3861f40b41..c3725cac25 100644 --- a/packages/windmill/src/services/consolidation/transmission_package.rs +++ b/packages/windmill/src/services/consolidation/transmission_package.rs @@ -52,8 +52,11 @@ pub fn compress_hash_eml(eml: &str) -> Result<(Vec, String)> { let rendered_xml_hash = hash_sha256(eml.as_bytes()) .with_context(|| "Error hashing the rendered XML")? .iter() - .map(|byte| format!("{byte:02X}")) - .collect(); + .fold(String::new(), |mut acc, &byte| { + use std::fmt::Write as _; + let _ = write!(acc, "{byte:02X}"); + acc + }); let compressed_xml = xz_compress(eml.as_bytes()).with_context(|| "Error compressing the rendered XML")?; @@ -147,21 +150,25 @@ fn generate_er_final_zip( let exz_xml_path = temp_dir_path.join(format!("{prefix}{MIRU_STATION_ID}.exz").as_str()); { - let mut exz_xml_file = File::create(&exz_xml_path) - .with_context(|| format!("Failed to create or open file: {exz_xml_path:?}"))?; + let mut exz_xml_file = File::create(&exz_xml_path).with_context(|| { + format!("Failed to create or open file: {}", exz_xml_path.display()) + })?; exz_xml_file .write_all(&exz_temp_file_bytes) - .with_context(|| format!("Failed to write data to file: {exz_xml_path:?}"))?; + .with_context(|| format!("Failed to write data to file: {}", exz_xml_path.display()))?; } let acm_json_stringified = serde_json::to_string_pretty(&acm_json)?; let exz_json_path = temp_dir_path.join(format!("{prefix}{MIRU_STATION_ID}.json")); { - let mut exz_json_file = File::create(&exz_json_path) - .with_context(|| format!("Failed to create or open file: {exz_json_path:?}"))?; + let mut exz_json_file = File::create(&exz_json_path).with_context(|| { + format!("Failed to create or open file: {}", exz_json_path.display()) + })?; exz_json_file .write_all(acm_json_stringified.as_bytes()) - .with_context(|| format!("Failed to write data to file: {exz_xml_path:?}"))?; + .with_context(|| { + format!("Failed to write data to file: {}", exz_json_path.display()) + })?; } compress_folder_to_zip(temp_dir_path, output_file_path)?; diff --git a/packages/windmill/src/services/consolidation/upload_signature_service.rs b/packages/windmill/src/services/consolidation/upload_signature_service.rs index b6cdb31907..6c6ae26947 100644 --- a/packages/windmill/src/services/consolidation/upload_signature_service.rs +++ b/packages/windmill/src/services/consolidation/upload_signature_service.rs @@ -99,13 +99,13 @@ async fn update_election_event_sbei_users( let key = prepend_miru_annotation(MIRU_SBEI_USERS); let serialized_sbei_users = serde_json::to_string(&new_sbei_users)?; annotations.insert(key, serialized_sbei_users); - let annotations_js = serde_json::to_value(&annotations)?; + let annotations_json = serde_json::to_value(&annotations)?; update_election_event_annotations( hasura_transaction, &election_event.tenant_id, &election_event.id, - annotations_js, + annotations_json, ) .await } @@ -289,6 +289,7 @@ pub fn create_server_signature( /// /// Auth/lookup failures, certificate checks, signing, document pipeline, or persistence errors. #[instrument(err)] +#[allow(clippy::too_many_lines)] pub async fn upload_transmission_package_signature_service( tenant_id: &str, election_id: &str, @@ -340,7 +341,7 @@ pub async fn upload_transmission_package_signature_service( .await .with_context(|| format!("Error fetching area {area_id}"))? .ok_or_else(|| anyhow!("Can't find area {area_id}"))?; - let area_name = area.name.clone().unwrap_or("".into()); + let area_name = area.name.clone().unwrap_or_default(); let area_annotations = area.get_annotations()?; // get sbei user @@ -505,7 +506,6 @@ pub async fn upload_transmission_package_signature_service( .await?; // upload zip of zips - let area_name = area.name.clone().unwrap_or_default(); let Some(first_document) = new_transmission_package_data.documents.first() else { return Err(anyhow!("Missing initial document")); }; diff --git a/packages/windmill/src/services/consolidation/zip.rs b/packages/windmill/src/services/consolidation/zip.rs index 951c404af9..4a3bb76b0f 100644 --- a/packages/windmill/src/services/consolidation/zip.rs +++ b/packages/windmill/src/services/consolidation/zip.rs @@ -21,9 +21,9 @@ use zip::write::{FileOptions, SimpleFileOptions}; /// Walkdir, zip writer, or filesystem errors. #[instrument(skip_all, err)] pub fn compress_folder_to_zip(src_dir: &Path, dst_file: &Path) -> Result<()> { - let path = src_dir.clone(); + let path = src_dir; let file = File::create(dst_file) - .with_context(|| format!("Failed to create destination file: {dst_file:?}"))?; + .with_context(|| format!("Failed to create destination file: {}", dst_file.display()))?; let mut zip = zip::ZipWriter::new(file); let options = SimpleFileOptions::default() @@ -31,24 +31,28 @@ pub fn compress_folder_to_zip(src_dir: &Path, dst_file: &Path) -> Result<()> { .unix_permissions(0o755); for entry in WalkDir::new(path) { - let entry = entry.with_context(|| format!("Failed to access entry in path: {path:?}"))?; + let entry = + entry.with_context(|| format!("Failed to access entry in path: {}", path.display()))?; let entry_path = entry.path(); - let name = entry_path - .strip_prefix(Path::new(path)) - .with_context(|| format!("Failed to strip prefix from path: {entry_path:?}"))?; + let name = entry_path.strip_prefix(Path::new(path)).with_context(|| { + format!("Failed to strip prefix from path: {}", entry_path.display()) + })?; info!("Adding entry to zip :{}", name.display()); if entry_path.is_file() { zip.start_file_from_path(name, options) - .with_context(|| format!("Failed to add file to zip: {entry_path:?}"))?; + .with_context(|| format!("Failed to add file to zip: {}", entry_path.display()))?; let mut f = File::open(entry_path) - .with_context(|| format!("Failed to open file: {entry_path:?}"))?; - io::copy(&mut f, &mut zip) - .with_context(|| format!("Failed to write file to zip: {entry_path:?}"))?; + .with_context(|| format!("Failed to open file: {}", entry_path.display()))?; + io::copy(&mut f, &mut zip).with_context(|| { + format!("Failed to write file to zip: {}", entry_path.display()) + })?; } else if !name.as_os_str().is_empty() { zip.add_directory_from_path(name, options) - .with_context(|| format!("Failed to add directory to zip: {entry_path:?}"))?; + .with_context(|| { + format!("Failed to add directory to zip: {}", entry_path.display()) + })?; } } zip.finish() @@ -64,39 +68,40 @@ pub fn compress_folder_to_zip(src_dir: &Path, dst_file: &Path) -> Result<()> { #[instrument(skip_all, err)] pub fn unzip_file(src_file: &Path, dst_dir: &Path) -> Result<()> { let file = File::open(src_file) - .with_context(|| format!("Failed to open source zip file: {src_file:?}"))?; + .with_context(|| format!("Failed to open source zip file: {}", src_file.display()))?; let mut archive = ZipArchive::new(file) - .with_context(|| format!("Failed to read zip archive: {src_file:?}"))?; + .with_context(|| format!("Failed to read zip archive: {}", src_file.display()))?; for i in 0..archive.len() { - let mut file = archive + let mut zip_member = archive .by_index(i) .with_context(|| format!("Failed to access file in archive at index: {i}"))?; - let out_path = dst_dir.join(file.sanitized_name()); + let out_path = dst_dir.join(zip_member.sanitized_name()); - if file.name().ends_with('/') { + if zip_member.name().ends_with('/') { fs::create_dir_all(&out_path) - .with_context(|| format!("Failed to create directory: {out_path:?}"))?; + .with_context(|| format!("Failed to create directory: {}", out_path.display()))?; } else { if let Some(p) = out_path.parent() { if !p.exists() { fs::create_dir_all(p) - .with_context(|| format!("Failed to create directory: {p:?}"))?; + .with_context(|| format!("Failed to create directory: {}", p.display()))?; } } let mut outfile = File::create(&out_path) - .with_context(|| format!("Failed to create output file: {out_path:?}"))?; - io::copy(&mut file, &mut outfile) - .with_context(|| format!("Failed to write file: {out_path:?}"))?; + .with_context(|| format!("Failed to create output file: {}", out_path.display()))?; + io::copy(&mut zip_member, &mut outfile) + .with_context(|| format!("Failed to write file: {}", out_path.display()))?; } #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; - if let Some(mode) = file.unix_mode() { - fs::set_permissions(&out_path, fs::Permissions::from_mode(mode)) - .with_context(|| format!("Failed to set permissions for: {out_path:?}"))?; + if let Some(mode) = zip_member.unix_mode() { + fs::set_permissions(&out_path, fs::Permissions::from_mode(mode)).with_context( + || format!("Failed to set permissions for: {}", out_path.display()), + )?; } } } diff --git a/packages/windmill/src/services/custom_url.rs b/packages/windmill/src/services/custom_url.rs index 500e418992..99f1ce34a9 100644 --- a/packages/windmill/src/services/custom_url.rs +++ b/packages/windmill/src/services/custom_url.rs @@ -103,7 +103,7 @@ enum ActionValue { #[derive(Debug, Serialize, Deserialize)] /// Cloudflare page rule action entry. struct Action { - /// Action identifier (e.g. "forwarding_url"). + /// Action identifier (e.g. `forwarding_url`). id: String, /// Action payload. value: ActionValue, @@ -155,11 +155,7 @@ pub async fn get_dns_record(record_name: &str) -> Result, Box< /// /// # Errors /// -/// Returns an error if fetching/updating DNS records or page rules fails. -/// -/// # Panics -/// -/// Panics if `key` is not one of: `"login"`, `"enrollment"`, or `"saml"`. +/// Returns an error if fetching/updating DNS records or page rules fails, or if `key` is invalid. pub async fn set_custom_url( origin: &str, redirect_to: &str, @@ -175,7 +171,7 @@ pub async fn set_custom_url( "login" => &prev_custom_urls.login, "enrollment" => &prev_custom_urls.enrollment, "saml" => &prev_custom_urls.saml, - _ => panic!("Invalid key provided"), + _ => return Err(format!("Invalid key provided: {key}").into()), }; let current_dns_record = match get_dns_record(current_prev_url).await { @@ -190,23 +186,19 @@ pub async fn set_custom_url( } }; - match current_dns_record { - Some(dns_record) => { - if let Err(e) = update_dns_record(&dns_record.id, redirect_to, dns_prefix).await { - let error_message = format!("Failed to update DNS record: {e}"); - error!("{}", error_message); - return Err(error_message.into()); - } - info!("DNS record updated successfully."); - } - None => { - if let Err(e) = create_dns_record(redirect_to, dns_prefix).await { - let error_message = format!("Failed to create DNS record: {e}"); - error!("{}", error_message); - return Err(error_message.into()); - } - info!("DNS record created successfully."); + if let Some(dns_record) = current_dns_record { + if let Err(e) = update_dns_record(&dns_record.id, redirect_to, dns_prefix).await { + let error_message = format!("Failed to update DNS record: {e}"); + error!("{}", error_message); + return Err(error_message.into()); } + info!("DNS record updated successfully."); + } else if let Err(e) = create_dns_record(redirect_to, dns_prefix).await { + let error_message = format!("Failed to create DNS record: {e}"); + error!("{}", error_message); + return Err(error_message.into()); + } else { + info!("DNS record created successfully."); } let current_page_rule = match get_page_rule(origin).await { @@ -221,23 +213,19 @@ pub async fn set_custom_url( } }; - match current_page_rule { - Some(page_rule) => { - if let Err(e) = update_page_rule(&page_rule.id, redirect_to, origin).await { - let error_message = format!("Failed to update page rule: {e}"); - error!("{}", error_message); - return Err(error_message.into()); - } - info!("Page rule updated successfully."); - } - None => { - if let Err(e) = create_page_rule(redirect_to, origin).await { - let error_message = format!("Failed to create page rule: {e}"); - error!("{}", error_message); - return Err(error_message.into()); - } - info!("Page rule created successfully."); + if let Some(page_rule) = current_page_rule { + if let Err(e) = update_page_rule(&page_rule.id, redirect_to, origin).await { + let error_message = format!("Failed to update page rule: {e}"); + error!("{}", error_message); + return Err(error_message.into()); } + info!("Page rule updated successfully."); + } else if let Err(e) = create_page_rule(redirect_to, origin).await { + let error_message = format!("Failed to create page rule: {e}"); + error!("{}", error_message); + return Err(error_message.into()); + } else { + info!("Page rule created successfully."); } Ok(()) @@ -332,7 +320,11 @@ async fn get_all_dns_records() -> Result, Box> { fn find_matching_dns_record(records: Vec, expected_name: &str) -> Option { info!("find_matching_dns_record expected_name:{}", expected_name); for record in records { - let name: Vec = record.name.split(".").map(|s| s.to_owned()).collect(); + let name: Vec = record + .name + .split('.') + .map(std::borrow::ToOwned::to_owned) + .collect(); if let Some(name) = name.first() { info!("name: {}", name); @@ -412,15 +404,12 @@ pub async fn create_dns_record(redirect_to: &str, dns_prefix: &str) -> Result<() let (zone_id, api_key) = match get_cloudflare_vars() { Ok(vars) => vars, Err(e) => { - error!("Failed to get Cloudflare environment variables: {}", e); + error!("Failed to get Cloudflare environment variables: {e}"); return Err(format!("Failed to get Cloudflare environment variables: {e}").into()); } }; - let url = format!( - "https://api.cloudflare.com/client/v4/zones/{}/dns_records", - zone_id - ); + let url = format!("https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records",); let request_dns_body = create_dns_payload(dns_prefix); info!("DNS prefix {:?}", dns_prefix); @@ -439,7 +428,7 @@ pub async fn create_dns_record(redirect_to: &str, dns_prefix: &str) -> Result<() }; if response.status().is_success() { - println!("DNS record created successfully"); + info!("DNS record created successfully"); Ok(()) } else { let body = match response.text().await { @@ -472,10 +461,7 @@ pub async fn update_dns_record( } }; - let url = format!( - "https://api.cloudflare.com/client/v4/zones/{}/dns_records/{}", - zone_id, id - ); + let url = format!("https://api.cloudflare.com/client/v4/zones/{zone_id}/dns_records/{id}",); let request_dns_body = create_dns_payload(dns_prefix); info!("DNS prefix {:?}", dns_prefix); @@ -494,7 +480,7 @@ pub async fn update_dns_record( }; if response.status().is_success() { - println!("DNS record created successfully"); + info!("DNS record created successfully"); Ok(()) } else { let body = match response.text().await { @@ -526,8 +512,7 @@ async fn update_page_rule( let response = client .put(format!( - "https://api.cloudflare.com/client/v4/zones/{}/pagerules/{}", - zone_id, rule_id + "https://api.cloudflare.com/client/v4/zones/{zone_id}/pagerules/{rule_id}", )) .header("Authorization", format!("Bearer {api_key}")) .json(&request_body) @@ -561,7 +546,7 @@ async fn create_page_rule(redirect_to: &str, origin: &str) -> Result<(), Box Result> { { let mut builder = SslConnector::builder(SslMethod::tls()) .map_err(|err| - anyhow!("error building SsslConnector: {}", err) + anyhow!("error building SsslConnector: {err}") )?; builder.set_ca_file( env::var("KEYCLOAK_DB_CA_PATH") .map_err(|err| - anyhow!("error loading KEYCLOAK_DB_CA_PATH var: {}", err) + anyhow!("error loading KEYCLOAK_DB_CA_PATH var: {err}") )? ) .map_err(|err| - anyhow!("error in builder.set_ca_file(): {}", err) + anyhow!("error in builder.set_ca_file(): {err}") )?; let connector_tls = MakeTlsConnector::new(builder.build()); @@ -97,7 +97,7 @@ pub async fn generate_keycloak_pool() -> Result> { .keycloak_db .create_pool(Some(Runtime::Tokio1), connector_tls) .map_err(|err| - anyhow!("error creating pool: {}", err) + anyhow!("error creating pool: {err}") )?; Ok(Arc::new(pool)) } else { @@ -105,7 +105,7 @@ pub async fn generate_keycloak_pool() -> Result> { .keycloak_db .create_pool(Some(Runtime::Tokio1), tokio_postgres::NoTls) .map_err(|err| - anyhow!("error creating pool: {}", err) + anyhow!("error creating pool: {err}") )?; Ok(Arc::new(pool)) } @@ -114,7 +114,7 @@ pub async fn generate_keycloak_pool() -> Result> { .keycloak_db .create_pool(Some(Runtime::Tokio1), tokio_postgres::NoTls) .map_err(|err| - anyhow!("error creating pool: {}", err) + anyhow!("error creating pool: {err}") )?; Ok(Arc::new(pool)) } @@ -137,16 +137,16 @@ pub async fn generate_hasura_pool() -> Result> { { let mut builder = SslConnector::builder(SslMethod::tls()) .map_err(|err| - anyhow!("error building SsslConnector: {}", err) + anyhow!("error building SsslConnector: {err}") )?; builder.set_ca_file( env::var("HASURA_DB_CA_PATH") .map_err(|err| - anyhow!("error loading HASURA_DB_CA_PATH var: {}", err) + anyhow!("error loading HASURA_DB_CA_PATH var: {err}") )? ) .map_err(|err| - anyhow!("error in builder.set_ca_file(): {}", err) + anyhow!("error in builder.set_ca_file(): {err}") )?; let connector_tls = MakeTlsConnector::new(builder.build()); @@ -154,7 +154,7 @@ pub async fn generate_hasura_pool() -> Result> { .hasura_db .create_pool(Some(Runtime::Tokio1), connector_tls) .map_err(|err| - anyhow!("error creating pool: {}", err) + anyhow!("error creating pool: {err}") )?; Ok(Arc::new(pool)) } else { @@ -162,7 +162,7 @@ pub async fn generate_hasura_pool() -> Result> { .hasura_db .create_pool(Some(Runtime::Tokio1), tokio_postgres::NoTls) .map_err(|err| - anyhow!("error creating pool: {}", err) + anyhow!("error creating pool: {err}") )?; Ok(Arc::new(pool)) } @@ -171,23 +171,27 @@ pub async fn generate_hasura_pool() -> Result> { .hasura_db .create_pool(Some(Runtime::Tokio1), tokio_postgres::NoTls) .map_err(|err| - anyhow!("error creating pool: {}", err) + anyhow!("error creating pool: {err}") )?; Ok(Arc::new(pool)) } } } -lazy_static! { +lazy_static::lazy_static! { static ref KEYCLOAK_POOL: AsyncOnce> = AsyncOnce::new(async { - let pool = generate_keycloak_pool().await.unwrap(); + let pool = generate_keycloak_pool() + .await + .expect("Keycloak pool initialization failed"); assert_standard_conforming_strings(&pool) .await .expect("Keycloak DB: standard_conforming_strings check failed"); pool }); static ref HASURA_POOL: AsyncOnce> = AsyncOnce::new(async { - let pool = generate_hasura_pool().await.unwrap(); + let pool = generate_hasura_pool() + .await + .expect("Hasura pool initialization failed"); assert_standard_conforming_strings(&pool) .await .expect("Hasura DB: standard_conforming_strings check failed"); diff --git a/packages/windmill/src/services/datafix/api_datafix.rs b/packages/windmill/src/services/datafix/api_datafix.rs index 20a7f0993d..2f1d4cd310 100644 --- a/packages/windmill/src/services/datafix/api_datafix.rs +++ b/packages/windmill/src/services/datafix/api_datafix.rs @@ -3,8 +3,8 @@ // SPDX-License-Identifier: AGPL-3.0-only //! Datafix operations: resolve a datafix-scoped election event, then mutate the //! matching Keycloak voter (create, update, disable, mark voted, PIN rotation) using admin APIs. -use super::types::*; -use super::utils::*; +use super::types::{DatafixResponse, JsonErrorResponse, MarkVotedBody, VoterInformationBody}; +use super::utils::{find_user_area_by_name, get_event_id_and_datafix_annotations, get_user_id}; use crate::services::users::{list_users, FilterOption, ListUsersFilter}; use anyhow::Result; @@ -23,7 +23,7 @@ use std::collections::HashMap; use std::env; use tracing::{error, info, instrument, warn}; /// Disable the voter, datafix users are not actually deleted but just disabled. -/// Note: voter_id in Datafix API represents the username in Keycloak/Sequent´s system. +/// Note: `voter_id` in Datafix API represents the username in Keycloak/Sequent´s system. /// /// # Errors /// @@ -128,7 +128,7 @@ pub async fn add_datafix_voter( let user = User { attributes: attributes.clone(), enabled: Some(true), - username: Some(username.to_string()), + username: Some(username.clone()), area: Some(area), ..User::default() }; @@ -147,7 +147,7 @@ pub async fn add_datafix_voter( } /// There are 2 things that can be updated, the area and the birthdate. -/// Note: voter_id in Datafix API represents the username in Keycloak/Sequent´s system. +/// Note: `voter_id` in Datafix API represents the username in Keycloak/Sequent´s system. /// /// # Errors /// @@ -361,7 +361,7 @@ pub async fn replace_voter_pin( Ok((users, 1)) => { let user = users .last() - .map(|val_ref| val_ref.to_owned()) + .map(std::borrow::ToOwned::to_owned) .unwrap_or_default(); if !user.enabled.unwrap_or(true) { warn!("Cannot replace pin because the user is disabled."); diff --git a/packages/windmill/src/services/datafix/mod.rs b/packages/windmill/src/services/datafix/mod.rs index 84f8b61dde..8cd2d197a0 100644 --- a/packages/windmill/src/services/datafix/mod.rs +++ b/packages/windmill/src/services/datafix/mod.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: AGPL-3.0-only -//! Datafix service modules: datafix HTTP endpoints and the VoterView SOAP bridge. +//! Datafix service modules: datafix HTTP endpoints and the `VoterView` SOAP bridge. pub mod api_datafix; pub mod types; diff --git a/packages/windmill/src/services/datafix/types.rs b/packages/windmill/src/services/datafix/types.rs index 4df19a56b6..e1f060a4fa 100644 --- a/packages/windmill/src/services/datafix/types.rs +++ b/packages/windmill/src/services/datafix/types.rs @@ -65,7 +65,7 @@ impl DatafixResponse { } } -/// VoterView SOAP connection details embedded in election event annotations. +/// `VoterView` SOAP connection details embedded in election event annotations. #[derive(Deserialize, Serialize, Debug)] pub struct VoterviewRequest { /// SOAP endpoint base URL configured per election event. @@ -74,7 +74,7 @@ pub struct VoterviewRequest { pub usr: String, /// MVV web-service password. pub psw: String, - /// County/municipality code required by the VoterView SOAP actions. + /// County/municipality code required by the `VoterView` SOAP actions. pub county_mun: String, } @@ -85,7 +85,7 @@ pub struct DatafixAnnotations { pub id: String, /// Rules for generating replacement passwords. pub password_policy: PasswordPolicy, - /// Credentials and endpoint data for outbound VoterView synchronization. + /// Credentials and endpoint data for outbound `VoterView` synchronization. pub voterview_request: VoterviewRequest, } @@ -137,7 +137,8 @@ impl PasswordPolicy { let mut pass = String::new(); let mut rng = rand::thread_rng(); for _ in 0..self.size { - pass.push_str(rng.gen_range(0..10).to_string().as_str()); + let digit = rng.gen_range(0..10_u32); + pass.push(char::from_digit(digit, 10).unwrap_or('0')); } pass } @@ -148,7 +149,7 @@ impl PasswordPolicy { .collect(), }; match self.base { - BasePolicy::IdPswConcat => format!("{}{}", voter_id, pin), + BasePolicy::IdPswConcat => format!("{voter_id}{pin}"), BasePolicy::PswOnly => pin, } } @@ -188,11 +189,11 @@ impl ValidateAnnotations for ElectionEventDatafix { } } -/// Supported SOAP request types for the VoterView integration. +/// Supported SOAP request types for the `VoterView` integration. #[derive(Display, Debug, Clone)] pub enum SoapRequest { /// `SetVoted` SOAP action after an internet ballot is accepted. SetVoted, - /// `SetNotVoted` SOAP action when a vote must be rolled back in VoterView. + /// `SetNotVoted` SOAP action when a vote must be rolled back in `VoterView`. SetNotVoted, } diff --git a/packages/windmill/src/services/datafix/utils.rs b/packages/windmill/src/services/datafix/utils.rs index dc94369fd9..c91899943d 100644 --- a/packages/windmill/src/services/datafix/utils.rs +++ b/packages/windmill/src/services/datafix/utils.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: AGPL-3.0-only //! Shared helpers and constants for datafix flows. -use super::types::*; +use super::types::{DatafixAnnotations, DatafixResponse, JsonErrorResponse, VoterInformationBody}; use crate::postgres::area::get_event_areas; use crate::postgres::election_event::get_election_event_by_id; use crate::postgres::election_event::{get_all_tenant_election_events, ElectionEventDatafix}; @@ -23,11 +23,12 @@ use tracing::{error, info, instrument, warn}; pub const DATAFIX_ID_KEY: &str = "datafix:id"; /// Annotation key storing JSON for password generation rules. pub const DATAFIX_PSW_POLICY_KEY: &str = "datafix:password_policy"; -/// Annotation key storing JSON credentials for the VoterView SOAP integration. +/// Annotation key storing JSON credentials for the `VoterView` SOAP integration. pub const DATAFIX_VOTERVIEW_REQ_KEY: &str = "datafix:voterview_request"; /// Returns true if the voter has voted via Sequent´s system - -/// this is if VOTED_CHANNEL attribute is set to VOTED_CHANNEL_INTERNET_VALUE. +/// this is if `VOTED_CHANNEL` attribute is set to `VOTED_CHANNEL_INTERNET_VALUE`. +#[allow(clippy::implicit_hasher)] #[instrument()] pub fn voted_via_internet(attributes: &HashMap>) -> bool { match attributes.iter().find(|tupple| tupple.0.eq(VOTED_CHANNEL)) { @@ -39,7 +40,8 @@ pub fn voted_via_internet(attributes: &HashMap>) -> bool { } /// Returns true if the voter has voted via a secondary channel, PAPER, PHONE, ETC - -/// this is if VOTED_CHANNEL attribute is set to anything else than Internet. +/// this is if `VOTED_CHANNEL` attribute is set to anything else than `Internet`. +#[allow(clippy::implicit_hasher)] #[instrument()] pub fn voted_via_not_internet_channel(attributes: &HashMap>) -> bool { match attributes.iter().find(|tupple| tupple.0.eq(VOTED_CHANNEL)) { @@ -49,7 +51,7 @@ pub fn voted_via_not_internet_channel(attributes: &HashMap>) None => false, } } -/// Gets the election_event_id and the DatafixAnnotations of the event that has the datafix id in its annotations. +/// Gets the `election_event_id` and the `DatafixAnnotations` of the event that has the datafix id in its annotations. /// /// # Errors /// @@ -105,7 +107,7 @@ pub async fn get_event_id_and_datafix_annotations( return Err(DatafixResponse::new(Status::NotFound)); } -/// Returns the UserArea object. If it cannot find the area id by name returns an error. +/// Returns the `UserArea` object. If it cannot find the area id by name returns an error. /// /// # Errors /// @@ -145,15 +147,14 @@ pub async fn find_user_area_by_name( }) .map(|area| area.id.clone()); - match area_id { - Some(id) => Ok(UserArea { + if let Some(id) = area_id { + Ok(UserArea { id: Some(id), name: Some(area_concat), - }), - None => { - error!("Error. Area not found for {}", area_concat); - Err(DatafixResponse::new(Status::NotFound)) - } + }) + } else { + error!("Error. Area not found for {}", area_concat); + Err(DatafixResponse::new(Status::NotFound)) } } @@ -181,7 +182,10 @@ pub async fn get_user_id( error!("Error getting users by username: Not Found"); return Err(DatafixResponse::new(Status::NotFound)); } - 1 => Ok(user_ids[0].clone()), + 1 => user_ids.into_iter().next().ok_or_else(|| { + error!("Error getting users by username: internal state"); + DatafixResponse::new(Status::InternalServerError) + }), _ => { error!("Error getting users by username: Multiple users Found"); return Err(DatafixResponse::new(Status::NotFound)); @@ -189,7 +193,7 @@ pub async fn get_user_id( } } -/// Get the ElectionEvent and check if its a datafix election event (has datafix:id annotations). +/// Get the `ElectionEvent` and check if its a datafix election event (has `datafix:id` annotations). /// /// # Errors /// diff --git a/packages/windmill/src/services/datafix/voterview_requests.rs b/packages/windmill/src/services/datafix/voterview_requests.rs index 1043864bed..a7064cb4bb 100644 --- a/packages/windmill/src/services/datafix/voterview_requests.rs +++ b/packages/windmill/src/services/datafix/voterview_requests.rs @@ -2,9 +2,9 @@ // // SPDX-License-Identifier: AGPL-3.0-only -//! SOAP request formatting and sending for the VoterView integration. +//! SOAP request formatting and sending for the `VoterView` integration. -use super::types::*; +use super::types::{DatafixAnnotations, SoapRequest}; use crate::postgres::election_event::ElectionEventDatafix; use crate::services::consolidation::eml_generator::ValidateAnnotations; use anyhow::{anyhow, Result}; @@ -67,6 +67,7 @@ impl SoapRequest { } /// Returns the SOAP body for the request type. + #[must_use] pub fn get_body( &self, annotations: &DatafixAnnotations, @@ -82,7 +83,7 @@ impl SoapRequest { } } -/// Sends a VoterView SOAP request for the given `req_type` using event annotations for endpoint and credentials. +/// Sends a `VoterView` SOAP request for the given `req_type` using event annotations for endpoint and credentials. /// /// # Errors /// @@ -100,13 +101,10 @@ pub async fn send( None, ); - let voter_id = match username.to_owned() { - Some(id) => id, - _ => { - return Err(anyhow!( - "Cannot send the request to datafix because the username is None" - )); - } + let Some(voter_id) = username.clone() else { + return Err(anyhow!( + "Cannot send the request to datafix because the username is None" + )); }; let annotations: DatafixAnnotations = election_event .get_annotations() @@ -165,12 +163,9 @@ pub async fn send( } /// Parses a tag from the response text. +#[must_use] pub fn parse_tag(open_tag: &str, close_tag: &str, response_txt: &str) -> Option { - match response_txt.split(open_tag).collect::>() { - after if after.len() > 1 => match after[1].split(close_tag).collect::>() { - before if before.len() > 1 => Some(before[0].to_string()), - _ => None, - }, - _ => None, - } + let (_, after_open) = response_txt.split_once(open_tag)?; + let (inner, _) = after_open.split_once(close_tag)?; + Some(inner.to_string()) } diff --git a/packages/windmill/src/services/delete_election_event.rs b/packages/windmill/src/services/delete_election_event.rs index bbf273d9cc..28095c72e2 100644 --- a/packages/windmill/src/services/delete_election_event.rs +++ b/packages/windmill/src/services/delete_election_event.rs @@ -59,14 +59,16 @@ pub async fn delete_event_b3( ) -> Result<()> { let mut board_client = get_b3_pgsql_client().await?; let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; - let board_name = get_event_board(tenant_id, election_event_id, &slug); + let event_board_name = get_event_board(tenant_id, election_event_id, &slug); let elections = get_elections(hasura_transaction, tenant_id, election_event_id).await?; - board_client.delete_board(board_name.as_str()).await?; + board_client.delete_board(event_board_name.as_str()).await?; for election in elections { - let board_name = get_election_board(tenant_id, &election.id, &slug); - board_client.delete_board(board_name.as_str()).await?; + let election_board_name = get_election_board(tenant_id, &election.id, &slug); + board_client + .delete_board(election_board_name.as_str()) + .await?; } Ok(()) @@ -84,22 +86,24 @@ pub async fn delete_election_event_b3( election_ids: &Vec, ) -> Result<()> { let slug = std::env::var("ENV_SLUG").with_context(|| "missing env var ENV_SLUG")?; - let board_name = get_event_board(tenant_id, election_event_id, &slug); + let event_board_name = get_event_board(tenant_id, election_event_id, &slug); let mut board_client = get_b3_pgsql_client().await?; - let existing: Option = - board_client.get_board(board_name.as_str()).await?; + let event_board_row: Option = + board_client.get_board(event_board_name.as_str()).await?; - if existing.is_some() { - board_client.delete_board(board_name.as_str()).await?; + if event_board_row.is_some() { + board_client.delete_board(event_board_name.as_str()).await?; } for election_id in election_ids { - let board_name = get_election_board(tenant_id, election_id, &slug); - let existing: Option = - board_client.get_board(board_name.as_str()).await?; - - if existing.is_some() { - board_client.delete_board(board_name.as_str()).await?; + let election_board_name = get_election_board(tenant_id, election_id, &slug); + let election_board_row: Option = + board_client.get_board(election_board_name.as_str()).await?; + + if election_board_row.is_some() { + board_client + .delete_board(election_board_name.as_str()) + .await?; } } Ok(()) @@ -142,7 +146,7 @@ pub async fn delete_election_event_related_documents( tenant_id: &str, election_event_id: &str, ) -> Result<()> { - let documents_prefix = format!("tenant-{}/event-{}/", tenant_id, election_event_id); + let documents_prefix = format!("tenant-{tenant_id}/event-{election_event_id}/"); let bucket = s3::get_private_bucket()?; s3::delete_files_from_s3(bucket, documents_prefix.clone(), false) .await diff --git a/packages/windmill/src/services/documents.rs b/packages/windmill/src/services/documents.rs index ab3a212ee3..5dbcfc89a5 100644 --- a/packages/windmill/src/services/documents.rs +++ b/packages/windmill/src/services/documents.rs @@ -51,20 +51,17 @@ pub async fn upload_and_return_document( info!("Document inserted {document:?}"); - let (document_s3_key, bucket) = match is_public { - true => { - let document_s3_key = s3::get_public_document_key(tenant_id, &document.id, name); - let bucket = s3::get_public_bucket()?; + let (document_s3_key, bucket) = if is_public { + let document_s3_key = s3::get_public_document_key(tenant_id, &document.id, name); + let bucket = s3::get_public_bucket()?; - (document_s3_key, bucket) - } - false => { - let document_s3_key = - s3::get_document_key(tenant_id, election_event_id.as_deref(), &document.id, name); - let bucket = s3::get_private_bucket()?; + (document_s3_key, bucket) + } else { + let document_s3_key = + s3::get_document_key(tenant_id, election_event_id.as_deref(), &document.id, name); + let bucket = s3::get_private_bucket()?; - (document_s3_key, bucket) - } + (document_s3_key, bucket) }; s3::upload_file_to_s3( @@ -160,18 +157,19 @@ pub async fn get_upload_url( None, ) .await - .map_err(|err| format!("Error inserting document: {:?}", err))?; + .map_err(|err| format!("Error inserting document: {err:?}"))?; - let path = match is_public { - true => s3::get_public_document_key(tenant_id, &document.id, name), - false => s3::get_document_key( + let path = if is_public { + s3::get_public_document_key(tenant_id, &document.id, name) + } else { + s3::get_document_key( tenant_id, election_event_id.clone().as_deref(), &document.id, name, - ), + ) }; - let url = s3::get_upload_url(path.to_string(), is_public, is_local.unwrap_or(false)).await?; + let url = s3::get_upload_url(path.clone(), is_public, is_local.unwrap_or(false)).await?; Ok((document, url)) } @@ -191,7 +189,7 @@ pub async fn get_document_url( let document = postgres::document::get_document( hasura_transaction, tenant_id, - election_event_id.map(|id| id.to_string()), + election_event_id.map(String::from), document_id, ) .await?; diff --git a/packages/windmill/src/services/election_dates.rs b/packages/windmill/src/services/election_dates.rs index 238269996d..644b269e12 100644 --- a/packages/windmill/src/services/election_dates.rs +++ b/packages/windmill/src/services/election_dates.rs @@ -4,8 +4,11 @@ //! Managing voting calendar and scheduled-date materialization for elections and events. -use crate::postgres::election::*; -use crate::postgres::scheduled_event::*; +use crate::postgres::election::get_election_by_id; +use crate::postgres::scheduled_event::{ + archive_scheduled_event, find_scheduled_event_by_task_id, insert_scheduled_event, + update_scheduled_event, +}; use crate::services::election_event_status::get_election_event_status; use anyhow::{anyhow, Result}; use deadpool_postgres::Transaction; @@ -13,7 +16,10 @@ use sequent_core::ballot::{ EInitializeReportPolicy, ElectionEventStatus, PeriodDates, StringifiedPeriodDates, }; use sequent_core::types::hasura::core::Election; -use sequent_core::types::scheduled_event::*; +use sequent_core::types::scheduled_event::{ + generate_manage_date_task_name, prepare_scheduled_dates, CronConfig, EventProcessors, + ManageElectionDatePayload, ScheduledEvent, +}; use std::str::FromStr; use tracing::instrument; @@ -97,7 +103,7 @@ pub async fn manage_dates( .await .map_err(|e| anyhow!("error inserting scheduled event: {e:?}"))?; } - }; + } } else { // Archive previous task if the date is set to null and we found some // task diff --git a/packages/windmill/src/services/election_event_board.rs b/packages/windmill/src/services/election_event_board.rs index 9ce8c6c5e2..740c08194c 100644 --- a/packages/windmill/src/services/election_event_board.rs +++ b/packages/windmill/src/services/election_event_board.rs @@ -31,6 +31,7 @@ impl From for BoardSerializable { } /// Get the database name for a B3 board reference. +#[must_use] pub fn get_election_event_board(bulletin_board_reference: Option) -> Option { bulletin_board_reference.and_then(|board_json| { let opt_board: Option = deserialize_value(board_json).ok(); diff --git a/packages/windmill/src/services/election_event_dates.rs b/packages/windmill/src/services/election_event_dates.rs index 891447cdb5..3b7eb5ed5a 100644 --- a/packages/windmill/src/services/election_event_dates.rs +++ b/packages/windmill/src/services/election_event_dates.rs @@ -7,12 +7,17 @@ use std::str::FromStr; use crate::postgres::election_event::get_election_event_by_id; -use crate::postgres::scheduled_event::*; +use crate::postgres::scheduled_event::{ + archive_scheduled_event, find_scheduled_event_by_task_id, insert_scheduled_event, + update_scheduled_event, +}; use anyhow::{anyhow, Result}; use deadpool_postgres::Transaction; use sequent_core::ballot::{ElectionPresentation, VotingPeriodDates}; use sequent_core::serialization::deserialize_with_path::deserialize_value; -use sequent_core::types::scheduled_event::*; +use sequent_core::types::scheduled_event::{ + generate_manage_date_task_name, CronConfig, EventProcessors, ManageElectionDatePayload, +}; use tracing::{info, instrument}; #[instrument(skip(hasura_transaction), err)] @@ -57,7 +62,7 @@ pub async fn manage_dates( cron_config, ) .await - .map_err(|e| anyhow!("error updating scheduled event: {e:?}"))? + .map_err(|e| anyhow!("error updating scheduled event: {e:?}"))?; } _ => { let payload = ManageElectionDatePayload { election_id: None }; @@ -74,7 +79,7 @@ pub async fn manage_dates( .await .map_err(|e| anyhow!("error inserting scheduled event: {e:?}"))?; } - }; + } } else { // Archive previous task if the date is set to null and we found some // task diff --git a/packages/windmill/src/services/election_event_statistics.rs b/packages/windmill/src/services/election_event_statistics.rs index 2b8cf7c63c..95061421f4 100644 --- a/packages/windmill/src/services/election_event_statistics.rs +++ b/packages/windmill/src/services/election_event_statistics.rs @@ -4,7 +4,7 @@ //! Managing statistics for a single election event. -use anyhow::Result; +use anyhow::{anyhow, Result}; use deadpool_postgres::Transaction; use sequent_core::services::uuid_validation::parse_uuid_v4; use tokio_postgres::row::Row; @@ -50,7 +50,9 @@ pub async fn get_count_areas( let total_areas: i64 = if rows.is_empty() { 0 } else { - rows[0].try_get::<&str, i64>("total_areas")? + rows.first() + .ok_or_else(|| anyhow!("missing total_areas row"))? + .try_get::<&str, i64>("total_areas")? }; Ok(total_areas) @@ -96,7 +98,9 @@ pub async fn get_count_elections( let total_elections: i64 = if rows.is_empty() { 0 } else { - rows[0].try_get::<&str, i64>("total_elections")? + rows.first() + .ok_or_else(|| anyhow!("missing total_elections row"))? + .try_get::<&str, i64>("total_elections")? }; Ok(total_elections) @@ -192,7 +196,9 @@ pub async fn get_count_distinct_voters( let total_distinct_voters: i64 = if rows.is_empty() { 0 } else { - rows[0].try_get::<&str, i64>("total_distinct_voters")? + rows.first() + .ok_or_else(|| anyhow!("missing total_distinct_voters row"))? + .try_get::<&str, i64>("total_distinct_voters")? }; Ok(total_distinct_voters) diff --git a/packages/windmill/src/services/election_event_status.rs b/packages/windmill/src/services/election_event_status.rs index 7dd0e67b2f..0d7d03b3d5 100644 --- a/packages/windmill/src/services/election_event_status.rs +++ b/packages/windmill/src/services/election_event_status.rs @@ -10,7 +10,10 @@ use crate::postgres::election::{get_election_by_id, get_elections, update_electi use crate::postgres::election_event::{get_election_event_by_id, update_election_event_status}; use anyhow::{anyhow, Context, Result}; use deadpool_postgres::Transaction; -use sequent_core::ballot::*; +use sequent_core::ballot::{ + EInitializeReportPolicy, ElectionEventStatus, ElectionStatus, VotingPeriodEnd, VotingStatus, + VotingStatusChannel, +}; use sequent_core::serialization::deserialize_with_path::deserialize_value; use sequent_core::types::hasura::core::{ElectionEvent, VotingChannels}; use serde_json::value::Value; @@ -19,11 +22,13 @@ use tracing::{event, info, instrument, Level}; use super::voting_status::update_board_on_status_change; /// Deserialize an `ElectionEventStatus` from JSON. +#[must_use] pub fn get_election_event_status(status_json_opt: Option) -> Option { status_json_opt.and_then(|status_json| deserialize_value(status_json).ok()) } /// Deserialize an `ElectionStatus` from JSON. +#[must_use] pub fn get_election_status(status_json_opt: Option) -> Option { status_json_opt.and_then(|status_json| deserialize_value(status_json).ok()) } @@ -35,6 +40,7 @@ pub fn get_election_status(status_json_opt: Option) -> Option, tenant_id: &str, @@ -42,7 +48,7 @@ pub async fn update_event_voting_status( username: Option<&str>, election_event_id: &str, new_status: &VotingStatus, - channels: &Option>, + voting_status_channels: &Option>, ) -> Result { let election_event = get_election_event_by_id(hasura_transaction, tenant_id, election_event_id) .await @@ -61,13 +67,13 @@ pub async fn update_event_voting_status( elections_status.insert(election.id.clone(), curr_election_status); } - let channels: Vec = if let Some(channel) = channels { + let channels: Vec = if let Some(channel) = voting_status_channels { info!("Reading input voting channels {channel:?}"); channel.clone() - } else if let Some(channels) = election_event.voting_channels.clone() { - info!("Reading Event voting channels {channels:?}"); - let voting_channels: VotingChannels = - deserialize_value(channels).context("Failed to deserialize event voting_channels")?; + } else if let Some(event_channels_json) = election_event.voting_channels.clone() { + info!("Reading Event voting channels {event_channels_json:?}"); + let voting_channels: VotingChannels = deserialize_value(event_channels_json) + .context("Failed to deserialize event voting_channels")?; let mut event_channels = vec![]; @@ -75,21 +81,21 @@ pub async fn update_event_voting_status( .channel_from(&voting_channels) .unwrap_or(false) { - event_channels.push(VotingStatusChannel::ONLINE) + event_channels.push(VotingStatusChannel::ONLINE); } if VotingStatusChannel::KIOSK .channel_from(&voting_channels) .unwrap_or(false) { - event_channels.push(VotingStatusChannel::KIOSK) + event_channels.push(VotingStatusChannel::KIOSK); } if VotingStatusChannel::EARLY_VOTING .channel_from(&voting_channels) .unwrap_or(false) { - event_channels.push(VotingStatusChannel::EARLY_VOTING) + event_channels.push(VotingStatusChannel::EARLY_VOTING); } event_channels @@ -117,16 +123,13 @@ pub async fn update_event_voting_status( } let expected_next_status = match current_voting_status { - VotingStatus::NOT_STARTED => { - vec![VotingStatus::OPEN] - } VotingStatus::OPEN => { vec![VotingStatus::PAUSED, VotingStatus::CLOSED] } VotingStatus::PAUSED => { vec![VotingStatus::CLOSED, VotingStatus::OPEN] } - VotingStatus::CLOSED => { + VotingStatus::NOT_STARTED | VotingStatus::CLOSED => { vec![VotingStatus::OPEN] } }; @@ -151,9 +154,10 @@ pub async fn update_event_voting_status( let mut elections_ids: Vec = Vec::new(); if *new_status == VotingStatus::OPEN || *new_status == VotingStatus::CLOSED { for election in &elections { - if let Some(status) = elections_status.get_mut(&election.id) { - status.close_early_voting_if_online_status_change(channel, *new_status); - status.set_status_by_channel(channel, *new_status); + if let Some(election_status) = elections_status.get_mut(&election.id) { + election_status + .close_early_voting_if_online_status_change(channel, *new_status); + election_status.set_status_by_channel(channel, *new_status); } elections_ids.push(election.id.clone()); } @@ -164,7 +168,7 @@ pub async fn update_event_voting_status( tenant_id, user_id, username, - election_event.id.to_string(), + election_event.id.clone(), election_event.bulletin_board_reference.clone(), *new_status, channel, @@ -207,6 +211,7 @@ pub async fn update_event_voting_status( /// # Errors /// /// Returns an error if the election/event cannot be loaded, transitions are invalid, or updates fail. +#[allow(clippy::too_many_lines)] pub async fn update_election_voting_status_impl( tenant_id: String, user_id: Option<&str>, @@ -260,8 +265,7 @@ pub async fn update_election_voting_status_impl( .unwrap_or_default() { return Err(anyhow!( - "election {:?} has the voting period end disallowed", - election_id, + "election {election_id:?} has the voting period end disallowed", )); } @@ -273,22 +277,18 @@ pub async fn update_election_voting_status_impl( && !election.initialization_report_generated.unwrap_or(false) { return Err(anyhow!( - "election {:?} initialization report must be generated before opening the election", - election_id, + "election {election_id:?} initialization report must be generated before opening the election", )); } let expected_next_status = match current_voting_status { - VotingStatus::NOT_STARTED => { - vec![VotingStatus::OPEN] - } VotingStatus::OPEN => { vec![VotingStatus::PAUSED, VotingStatus::CLOSED] } VotingStatus::PAUSED => { vec![VotingStatus::CLOSED, VotingStatus::OPEN] } - VotingStatus::CLOSED => { + VotingStatus::NOT_STARTED | VotingStatus::CLOSED => { vec![VotingStatus::OPEN] } }; @@ -327,11 +327,11 @@ pub async fn update_election_voting_status_impl( &tenant_id, user_id, username, - election_event_id.to_string(), + election_event_id.clone(), bulletin_board_reference.clone(), new_status, channel, - Some(election_id.to_string()), + Some(election_id.clone()), None, ) .await diff --git a/packages/windmill/src/services/election_statistics.rs b/packages/windmill/src/services/election_statistics.rs index f89a46af59..08640c4790 100644 --- a/packages/windmill/src/services/election_statistics.rs +++ b/packages/windmill/src/services/election_statistics.rs @@ -4,7 +4,7 @@ //! Cross-election statistical queries used in dashboards and operational reports. -use anyhow::Result; +use anyhow::{anyhow, Result}; use deadpool_postgres::Transaction; use sequent_core::services::uuid_validation::parse_uuid_v4; use tokio_postgres::row::Row; @@ -108,7 +108,9 @@ pub async fn get_count_distinct_voters( let total_distinct_voters: i64 = if rows.is_empty() { 0 } else { - rows[0].try_get::<&str, i64>("total_distinct_voters")? + rows.first() + .ok_or_else(|| anyhow!("missing total_distinct_voters row"))? + .try_get::<&str, i64>("total_distinct_voters")? }; Ok(total_distinct_voters) @@ -167,7 +169,9 @@ pub async fn get_count_areas( let total_areas: i64 = if rows.is_empty() { 0 } else { - rows[0].try_get::<&str, i64>("total_areas")? + rows.first() + .ok_or_else(|| anyhow!("missing total_areas row"))? + .try_get::<&str, i64>("total_areas")? }; Ok(total_areas) diff --git a/packages/windmill/src/services/electoral_log.rs b/packages/windmill/src/services/electoral_log.rs index 63fc424e0f..252431cd1c 100644 --- a/packages/windmill/src/services/electoral_log.rs +++ b/packages/windmill/src/services/electoral_log.rs @@ -22,7 +22,12 @@ use base64::Engine; use deadpool_postgres::Transaction; use electoral_log::assign_value; use electoral_log::messages::message::{Message, SigningData}; -use electoral_log::messages::newtypes::{CertificateAuthEventAction, *}; +use electoral_log::messages::newtypes::{ + BallotPublicationIdString, CastVoteErrorString, CastVoteHash, CertificateAuthEventAction, + ContestIdString, ElectionIdString, ErrorMessageString, EventIdString, KeycloakEventTypeString, + PseudonymHash, PublicKeyDerB64, TenantIdString, TrusteeNameString, VoterCountryString, + VoterIpString, VotingChannelString, +}; use electoral_log::messages::statement::{StatementBody, StatementType}; use electoral_log::{ ElectoralLogMessage, ElectoralLogVarCharColumn, SqlCompOperators, WhereClauseBTreeMap, @@ -51,9 +56,16 @@ pub const IMMUDB_ROWS_LIMIT: usize = 2500; /// Default maximum number of rows returned per page in list endpoints. pub const MAX_ROWS_PER_PAGE: usize = 50; -/// Ballot_id input is the first half of the original hash which is stored in the electoral log. +/// Same numeric value as [`IMMUDB_ROWS_LIMIT`], as `i64`, for query defaults. +#[allow(clippy::cast_possible_wrap)] // IMMUDB_ROWS_LIMIT is 2500 +pub const IMMUDB_ROWS_LIMIT_I64: i64 = IMMUDB_ROWS_LIMIT as i64; +/// Same numeric value as [`MAX_ROWS_PER_PAGE`], as `i64`, for query defaults. +#[allow(clippy::cast_possible_wrap)] // MAX_ROWS_PER_PAGE is 50 +pub const MAX_ROWS_PER_PAGE_I64: i64 = MAX_ROWS_PER_PAGE as i64; + +/// `Ballot_id` input is the first half of the original hash which is stored in the electoral log. pub const BALLOT_ID_LENGTH_BYTES: usize = STRAND_HASH_LENGTH_BYTES / 2; -/// Ballot_id input is in HEX, each byte is represented in 2 chars. +/// `Ballot_id` input is in HEX, each byte is represented in 2 chars. pub const BALLOT_ID_LENGTH_CHARS: usize = BALLOT_ID_LENGTH_BYTES * 2; /// Helper for creating and posting signed electoral-log messages. @@ -65,10 +77,11 @@ pub struct ElectoralLog { } /// If the list contains exactly one election id, return it; otherwise return `None`. +#[must_use] pub fn flatten_election_ids(election_ids: Option>) -> Option { election_ids.and_then(|ids| { if ids.len() == 1 { - Some(ids[0].clone()) + ids.first().cloned() } else { None } @@ -77,7 +90,7 @@ pub fn flatten_election_ids(election_ids: Option>) -> Option impl ElectoralLog { #[instrument(err, name = "ElectoralLog::new")] - /// Create a new ElectoralLog. + /// Create a new `ElectoralLog`. /// /// # Errors /// @@ -110,7 +123,7 @@ impl ElectoralLog { } #[instrument(skip(sender_sk), err)] - /// Create a new ElectoralLog from a signing key. + /// Create a new `ElectoralLog` from a signing key. /// /// # Errors /// @@ -285,7 +298,7 @@ impl ElectoralLog { /// /// Because admin users are cross election event entities, a /// dummy election event id will be used instead, with value - /// electoral_log::messages::Message:GENERIC_EVENT. + /// `electoral_log::messages::Message:GENERIC_EVENT`. /// /// FIXME: it may be necessary to implement a tenant-wide electoral /// log to save this type of message. An admin user could be created @@ -915,7 +928,7 @@ impl ElectoralLog { .await } - /// Builds a keycloak event message and returns the resulting ElectoralLogMessage. + /// Builds a keycloak event message and returns the resulting `ElectoralLogMessage`. /// /// # Errors /// @@ -945,7 +958,7 @@ impl ElectoralLog { Ok(board_message) } - /// Builds a send-template message and returns the resulting ElectoralLogMessage. + /// Builds a send-template message and returns the resulting `ElectoralLogMessage`. /// /// # Errors /// @@ -970,7 +983,7 @@ impl ElectoralLog { message_body, area_id, ) - .map_err(|e| anyhow!("Error creating send template message: {:?}", e))?; + .map_err(|e| anyhow!("Error creating send template message: {e:?}"))?; let board_message: ElectoralLogMessage = message.try_into()?; Ok(board_message) } @@ -997,7 +1010,7 @@ impl ElectoralLog { result.map_err(|err| anyhow::Error::new(err).context("Failed to read CSV row"))?; let message: &Message = &Message::strand_deserialize(&general_purpose::STANDARD_NO_PAD.decode(&row.data)?) - .map_err(|err| anyhow!("Failed to deserialize message: {:?}", err))?; + .map_err(|err| anyhow!("Failed to deserialize message: {err:?}"))?; let electoral_log_message: ElectoralLogMessage = message.try_into()?; messages.push(electoral_log_message); @@ -1067,6 +1080,7 @@ impl GetElectoralLogBody { /// /// Returns an error if the SQL clauses cannot be built. #[instrument(ret)] + #[allow(clippy::too_many_lines)] fn as_sql(&self, to_count: bool) -> Result<(String, Vec)> { let mut clauses = Vec::new(); let mut params = Vec::new(); @@ -1081,23 +1095,23 @@ impl GetElectoralLogBody { match field { OrderField::Id => { // sql INTEGER type let int_value: i64 = value.parse()?; - where_clauses.push(format!("id = @{}", param_name)); + where_clauses.push(format!("id = @{param_name}")); params.push(create_named_param(param_name, Value::N(int_value))); } OrderField::SenderPk | OrderField::UserId | OrderField::Username | OrderField::BallotId | OrderField::StatementKind | OrderField::Version => { // sql VARCHAR type - where_clauses.push(format!("{field} LIKE @{}", param_name)); - params.push(create_named_param(param_name, Value::S(value.to_string()))); + where_clauses.push(format!("{field} LIKE @{param_name}")); + params.push(create_named_param(param_name, Value::S(value.clone()))); } OrderField::StatementTimestamp | OrderField::Created => { // sql TIMESTAMP type // these have their own column and are inside of Message´s column as well let datetime = ISO8601::to_date_utc(value) - .map_err(|err| anyhow!("Failed to parse timestamp: {:?}", err))?; + .map_err(|err| anyhow!("Failed to parse timestamp: {err:?}"))?; let ts: i64 = datetime.timestamp(); let ts_end: i64 = ts .checked_add(60) .expect("timestamp search end overflow"); // Search along that minute; seconds are not specified by the client. let param_name_end = format!("{param_name}_end"); - where_clauses.push(format!("{field} >= @{} AND {field} < @{}", param_name, param_name_end)); + where_clauses.push(format!("{field} >= @{param_name} AND {field} < @{param_name_end}")); params.push(create_named_param(param_name, Value::Ts(ts))); params.push(create_named_param(param_name_end, Value::Ts(ts_end))); } @@ -1109,7 +1123,7 @@ impl GetElectoralLogBody { if !where_clauses.is_empty() { clauses.push(format!("WHERE {}", where_clauses.join(" AND "))); } - }; + } // Build a single extra clause. // This clause returns rows if: @@ -1135,10 +1149,10 @@ impl GetElectoralLogBody { let placeholders: Vec = area_ids .iter() .enumerate() - .map(|(i, _)| format!("@param_area{}", i)) + .map(|(i, _)| format!("@param_area{i}")) .collect(); for (i, area) in area_ids.iter().enumerate() { - let param_name = format!("param_area{}", i); + let param_name = format!("param_area{i}"); params.push(create_named_param( param_name.clone(), Value::S(area.clone()), @@ -1175,18 +1189,15 @@ impl GetElectoralLogBody { } if !extra_where_clauses.is_empty() { - match clauses.len() { - 0 => { - clauses.push(format!("WHERE {}", extra_where_clauses.join(" AND "))); - } - _ => { - let where_clause = clauses.pop().ok_or(anyhow!("Empty clause"))?; - clauses.push(format!( - "{} AND {}", - where_clause, - extra_where_clauses.join(" AND ") - )); - } + if clauses.is_empty() { + clauses.push(format!("WHERE {}", extra_where_clauses.join(" AND "))); + } else { + let where_clause = clauses.pop().ok_or(anyhow!("Empty clause"))?; + clauses.push(format!( + "{} AND {}", + where_clause, + extra_where_clauses.join(" AND ") + )); } } @@ -1219,7 +1230,7 @@ impl GetElectoralLogBody { if !to_count && self.offset.is_some() { let offset_param_name = String::from("offset"); let offset = std::cmp::max(self.offset.unwrap_or(0), 0); - clauses.push(format!("OFFSET @{}", offset_param_name)); + clauses.push(format!("OFFSET @{offset_param_name}")); params.push(create_named_param(offset_param_name, Value::N(offset))); } @@ -1255,36 +1266,43 @@ pub struct StatementHeadDataString { impl ElectoralLogRow { /// Database id of the log row. - pub fn id(&self) -> i64 { + #[must_use] + pub const fn id(&self) -> i64 { self.id } /// Row creation time. - pub fn created(&self) -> i64 { + #[must_use] + pub const fn created(&self) -> i64 { self.created } /// Statement timestamp from immudb. - pub fn statement_timestamp(&self) -> i64 { + #[must_use] + pub const fn statement_timestamp(&self) -> i64 { self.statement_timestamp } /// Statement kind string (e.g. cast vote vs audit). + #[must_use] pub fn statement_kind(&self) -> &str { &self.statement_kind } /// JSON string of the deserialized message payload. + #[must_use] pub fn message(&self) -> &str { &self.message } /// User id associated with the statement, if present. + #[must_use] pub fn user_id(&self) -> Option<&str> { self.user_id.as_deref() } /// Username associated with the statement, if present. + #[must_use] pub fn username(&self) -> Option<&str> { self.username.as_deref() } @@ -1317,7 +1335,7 @@ impl ElectoralLogRow { }; let data: StatementHeadDataString = deserialize_value(head.clone()) - .map_err(|err| anyhow!(format!("{:?}, Failed to parse head: {}", err, head)))?; + .map_err(|err| anyhow!(format!("{err:?}, Failed to parse head: {head}")))?; Ok(data) } @@ -1351,9 +1369,9 @@ impl TryFrom<&Row> for ElectoralLogRow { fn try_from(row: &Row) -> Result { let mut id = 0; let mut created: i64 = 0; - let mut sender_pk = String::from(""); + let mut sender_pk = String::new(); let mut statement_timestamp: i64 = 0; - let mut statement_kind = String::from(""); + let mut statement_kind = String::new(); let mut message = vec![]; let mut user_id = None; let mut username = None; @@ -1361,33 +1379,31 @@ impl TryFrom<&Row> for ElectoralLogRow { for (column, value) in row.columns.iter().zip(row.values.iter()) { match column.as_str() { c if c.ends_with(".id)") => { - assign_value!(Value::N, value, id) + assign_value!(Value::N, value, id); } c if c.ends_with(".created)") => { - assign_value!(Value::Ts, value, created) + assign_value!(Value::Ts, value, created); } c if c.ends_with(".sender_pk)") => { - assign_value!(Value::S, value, sender_pk) + assign_value!(Value::S, value, sender_pk); } c if c.ends_with(".statement_timestamp)") => { - assign_value!(Value::Ts, value, statement_timestamp) + assign_value!(Value::Ts, value, statement_timestamp); } c if c.ends_with(".statement_kind)") => { - assign_value!(Value::S, value, statement_kind) + assign_value!(Value::S, value, statement_kind); } c if c.ends_with(".message)") => { - assign_value!(Value::Bs, value, message) + assign_value!(Value::Bs, value, message); } c if c.ends_with(".user_id)") => match value.value.as_ref() { Some(Value::S(inner)) => user_id = Some(inner.clone()), - Some(Value::Null(_)) => user_id = None, - None => user_id = None, + Some(Value::Null(_)) | None => user_id = None, _ => return Err(anyhow!("invalid column value for 'user_id'")), }, c if c.ends_with(".username)") => match value.value.as_ref() { Some(Value::S(inner)) => username = Some(inner.clone()), - Some(Value::Null(_)) => username = None, - None => username = None, + Some(Value::Null(_)) | None => username = None, _ => return Err(anyhow!("invalid column value for 'username'")), }, _ => return Err(anyhow!("invalid column found '{}'", column.as_str())), @@ -1441,7 +1457,7 @@ impl CastVoteEntry { let ballot_id = entry.ballot_id.clone().unwrap_or_default(); let username = entry.username.clone(); let message: &Message = &Message::strand_deserialize(&entry.message) - .map_err(|err| anyhow!("Failed to deserialize message: {:?}", err))?; + .map_err(|err| anyhow!("Failed to deserialize message: {err:?}"))?; let message = Some(message.to_string()); Ok(Some(CastVoteEntry { @@ -1492,7 +1508,7 @@ pub async fn list_electoral_log(input: GetElectoralLogBody) -> Result = Vec::with_capacity(limit); let mut resp_stream = sql_query_response.into_inner(); @@ -1505,7 +1521,7 @@ pub async fn list_electoral_log(input: GetElectoralLogBody) -> Result Result IMMUDB_ROWS_LIMIT as i64, // When there is a filter, need to fetch all entries by batches. - true => input.limit.unwrap_or(MAX_ROWS_PER_PAGE as i64), + let limit: i64 = if ballot_id_filter.is_empty() { + input.limit.unwrap_or(MAX_ROWS_PER_PAGE_I64) + } else { + IMMUDB_ROWS_LIMIT_I64 // When there is a filter, need to fetch all entries by batches. }; let mut offset: i64 = input.offset.unwrap_or(0); let mut list: Vec = Vec::with_capacity(MAX_ROWS_PER_PAGE); // Filtered messages. let (cols_match_count, cols_match_select) = get_cols_match_count_and_select(&election_id, user_id, ballot_id_filter); let mut client = get_board_client().await?; - let total = client - .count_electoral_log_messages(&board_name, Some(cols_match_count)) - .await? - .to_u64() - .unwrap_or(0) as usize; + let total = usize::try_from( + client + .count_electoral_log_messages(&board_name, Some(cols_match_count)) + .await? + .to_u64() + .unwrap_or(0), + ) + .unwrap_or(0); let mut filter_matched = false; // Exit at the first match if the filter is not empty - while (list.len() as i64) < output_limit && (offset < total as i64) && !filter_matched { + let total_i64 = i64::try_from(total).with_context(|| "total to i64 conversion overflow")?; + while i64::try_from(list.len()).with_context(|| "list.len to i64 conversion overflow")? + < output_limit + && offset < total_i64 + && !filter_matched + { let electoral_log_messages = client .get_electoral_log_messages_filtered( &board_name, @@ -1628,11 +1653,11 @@ pub async fn list_cast_vote_messages( order_by.clone(), ) .await - .map_err(|err| anyhow!("Failed to get filtered messages: {:?}", err))?; + .map_err(|err| anyhow!("Failed to get filtered messages: {err:?}"))?; let t_entries = electoral_log_messages.len(); info!("Got {t_entries} entries. Offset: {offset}, limit: {limit}, total: {total}"); - for message in electoral_log_messages.iter() { + for message in &electoral_log_messages { match CastVoteEntry::from_elog_message(message)? { Some(entry) if !ballot_id_filter.is_empty() => { // If there is filter exit at the first match @@ -1645,7 +1670,10 @@ pub async fn list_cast_vote_messages( } None => {} } - if (list.len() as i64) >= output_limit || filter_matched { + if i64::try_from(list.len()).with_context(|| "list.len to i64 conversion overflow")? + >= output_limit + || filter_matched + { break; } } diff --git a/packages/windmill/src/services/event_list.rs b/packages/windmill/src/services/event_list.rs index 00ce901131..cd5558fda0 100644 --- a/packages/windmill/src/services/event_list.rs +++ b/packages/windmill/src/services/event_list.rs @@ -14,7 +14,7 @@ use deadpool_postgres::Transaction; use rocket::http::Status; use sequent_core::services::keycloak; use sequent_core::types::hasura::core::ElectionEvent; -use sequent_core::types::scheduled_event::*; +use sequent_core::types::scheduled_event::ScheduledEvent; use serde::{Deserialize, Serialize}; use serde_json::Value as Jsonb; use std::{collections::HashMap, convert::TryFrom}; @@ -81,9 +81,7 @@ impl TryFrom<(ScheduledEvent, ElectionEvent)> for GetEventListOutput { .and_then(|id| id.as_str()) .unwrap_or_default() .to_string(), - schedule: event_data - .cron_config - .and_then(|cc| cc.scheduled_date.map(|d| d.to_string())), + schedule: event_data.cron_config.and_then(|cc| cc.scheduled_date), task_id: event_data.task_id.clone(), tenant_id: event_data.tenant_id, election_event_id: event_data.election_event_id, diff --git a/packages/windmill/src/services/export/export_application.rs b/packages/windmill/src/services/export/export_application.rs index 0c87cb6346..c156b4a065 100644 --- a/packages/windmill/src/services/export/export_application.rs +++ b/packages/windmill/src/services/export/export_application.rs @@ -71,7 +71,7 @@ pub async fn write_export_document( "verification_type", "status", ]; - let name = format!("applications-{}", document_id); + let name = format!("applications-{document_id}"); let mut writer = Writer::from_writer(vec![]); writer.write_record(&headers)?; @@ -117,7 +117,7 @@ pub async fn write_export_document( &temp_path_string, file_size, "text/csv", - &first_task.tenant_id.to_string(), + &first_task.tenant_id.clone(), Some(first_task.election_event_id.clone()), &name, Some(document_id.to_string()), diff --git a/packages/windmill/src/services/export/export_ballot_publication.rs b/packages/windmill/src/services/export/export_ballot_publication.rs index 7108a11e60..a2814180a4 100644 --- a/packages/windmill/src/services/export/export_ballot_publication.rs +++ b/packages/windmill/src/services/export/export_ballot_publication.rs @@ -36,9 +36,9 @@ pub async fn write_export_document( tenant_id: &str, to_upload: bool, ) -> Result { - let document_name = format!("export-{}.json", document_id); + let document_name = format!("export-{document_id}.json"); - let (_temp_path, temp_path_string, file_size) = + let (temp_path, temp_path_string, file_size) = write_into_named_temp_file(&data, &document_name, ".json") .map_err(|e| anyhow!("Error writing into named temp file: {e:?}"))?; @@ -58,7 +58,7 @@ pub async fn write_export_document( .map_err(|e| anyhow!("Error uploading and returning document to postgres: {e:?}"))?; } - Ok(_temp_path) + Ok(temp_path) } /// Builds one JSON object per ballot publication. diff --git a/packages/windmill/src/services/export/export_bulletin_boards.rs b/packages/windmill/src/services/export/export_bulletin_boards.rs index 8d37ca59ca..877d218e9b 100644 --- a/packages/windmill/src/services/export/export_bulletin_boards.rs +++ b/packages/windmill/src/services/export/export_bulletin_boards.rs @@ -22,47 +22,48 @@ use regex::Regex; use sequent_core::util::aws::get_max_upload_size; use sequent_core::util::temp_path::generate_temp_file; use std::collections::HashMap; +use std::sync::LazyLock; use tempfile::{NamedTempFile, TempPath}; use tracing::{event, info, instrument, Level}; -lazy_static! { - /// Validates bulletin-board CSV column names (alphanumeric, dot, underscore, hyphen). - pub static ref HEADER_RE: Regex = Regex::new(r"^[a-zA-Z0-9._-]+$").unwrap(); - /// CSV header: owning election id (empty string for the event-level board). - pub static ref ELECTION_ID_COL_NAME: String = String::from("election_id"); - /// CSV header: message row id. - pub static ref ID_COL_NAME: String = String::from("id"); - /// CSV header: row creation timestamp. - pub static ref CREATED_COL_NAME: String = "created".to_string(); - /// CSV header: sender public key. - pub static ref SENDER_PK_COL_NAME: String = "sender_pk".to_string(); - /// CSV header: statement timestamp. - pub static ref STATEMENT_TIMESTAMP_COL_NAME: String = "statement_timestamp".to_string(); - /// CSV header: statement kind discriminator. - pub static ref STATEMENT_COL_NAME: String = "statement_kind".to_string(); - /// CSV header: batch index. - pub static ref BATCH_COL_NAME: String = "batch".to_string(); - /// CSV header: mix round number. - pub static ref MIX_NUMBER_COL_NAME: String = "mix_number".to_string(); - /// CSV header: base64-encoded payload. - pub static ref MESSAGE_COL_NAME: String = "message".to_string(); - /// CSV header: row schema/version tag. - pub static ref VERSION_COL_NAME: String = "version".to_string(); - /// CSV header used in trustee config exports (trustee display name). - pub static ref TRUSTEE_NAME_COL_NAME: String = "trustee".to_string(); - /// CSV header for trustee-side configuration blob. - pub static ref TRUSTEE_CONFIG_COL_NAME: String = "config".to_string(); -} +/// Validates bulletin-board CSV column names (alphanumeric, dot, underscore, hyphen). +pub static HEADER_RE: LazyLock = + LazyLock::new(|| Regex::new(r"^[a-zA-Z0-9._-]+$").expect("HEADER_RE regex must compile")); +/// CSV header: owning election id (empty string for the event-level board). +pub static ELECTION_ID_COL_NAME: LazyLock = LazyLock::new(|| String::from("election_id")); +/// CSV header: message row id. +pub static ID_COL_NAME: LazyLock = LazyLock::new(|| String::from("id")); +/// CSV header: row creation timestamp. +pub static CREATED_COL_NAME: LazyLock = LazyLock::new(|| "created".to_string()); +/// CSV header: sender public key. +pub static SENDER_PK_COL_NAME: LazyLock = LazyLock::new(|| "sender_pk".to_string()); +/// CSV header: statement timestamp. +pub static STATEMENT_TIMESTAMP_COL_NAME: LazyLock = + LazyLock::new(|| "statement_timestamp".to_string()); +/// CSV header: statement kind discriminator. +pub static STATEMENT_COL_NAME: LazyLock = LazyLock::new(|| "statement_kind".to_string()); +/// CSV header: batch index. +pub static BATCH_COL_NAME: LazyLock = LazyLock::new(|| "batch".to_string()); +/// CSV header: mix round number. +pub static MIX_NUMBER_COL_NAME: LazyLock = LazyLock::new(|| "mix_number".to_string()); +/// CSV header: base64-encoded payload. +pub static MESSAGE_COL_NAME: LazyLock = LazyLock::new(|| "message".to_string()); +/// CSV header: row schema/version tag. +pub static VERSION_COL_NAME: LazyLock = LazyLock::new(|| "version".to_string()); +/// CSV header used in trustee config exports (trustee display name). +pub static TRUSTEE_NAME_COL_NAME: LazyLock = LazyLock::new(|| "trustee".to_string()); +/// CSV header for trustee-side configuration blob. +pub static TRUSTEE_CONFIG_COL_NAME: LazyLock = LazyLock::new(|| "config".to_string()); /// Converts a single B3 bulletin-board row into a CSV record (message is standard base64, no padding). #[instrument] -fn get_board_record(election_id: &str, row: B3MessageRow) -> Vec { +fn get_board_record(election_id: &str, row: &B3MessageRow) -> Vec { let message_b64 = general_purpose::STANDARD_NO_PAD.encode(row.message.clone()); vec![ election_id.to_string(), row.id.to_string(), row.created.to_string(), - row.sender_pk.to_string(), + row.sender_pk.clone(), row.statement_timestamp.to_string(), row.statement_kind.clone(), row.batch.to_string(), @@ -98,7 +99,7 @@ async fn create_boards_csv(boards_map: HashMap>) -> Re writer.write_record(&headers)?; for (board_name, board_rows) in boards_map { for board_row in board_rows { - let record = get_board_record(&board_name, board_row); + let record = get_board_record(&board_name, &board_row); writer .write_record(&record) .with_context(|| "Error writing record")?; @@ -146,7 +147,7 @@ pub async fn read_election_event_boards( let board_name = get_event_board(tenant_id, election_event_id, &slug); let b3_messages = b3_client.get_messages(&board_name, -1).await?; - boards_map.insert("".to_string(), b3_messages); + boards_map.insert(String::new(), b3_messages); } // elections @@ -192,7 +193,7 @@ pub async fn read_protocol_manager_keys( ) .await? .ok_or(anyhow!("protocol manager secret not found"))?; - let record = vec!["".into(), protocol_manager_data]; + let record = vec![String::new(), protocol_manager_data]; writer .write_record(&record) .with_context(|| "Error writing record")?; diff --git a/packages/windmill/src/services/export/export_election_event.rs b/packages/windmill/src/services/export/export_election_event.rs index 2dc90d6028..b1e291e1b5 100644 --- a/packages/windmill/src/services/export/export_election_event.rs +++ b/packages/windmill/src/services/export/export_election_event.rs @@ -67,6 +67,7 @@ use crate::services::password; /// Propagates parallel query failures, Keycloak admin errors, /// UUID parse errors, or image download failures. #[instrument(err, skip(transaction))] +#[allow(clippy::large_futures)] pub async fn read_export_data( transaction: &Transaction<'_>, tenant_id: &str, @@ -120,7 +121,9 @@ pub async fn read_export_data( }) .collect(); - let export_elections = if !export_config.bulletin_board { + let export_elections = if export_config.bulletin_board { + elections.clone() + } else { elections .clone() .into_iter() @@ -129,8 +132,6 @@ pub async fn read_export_data( ..election.clone() }) .collect() - } else { - elections.clone() }; let export_keys_ceremonies = if export_config.bulletin_board { @@ -198,9 +199,9 @@ pub async fn generate_encrypted_zip( /// # Errors /// /// Returns an error when serialization or temp file IO fails. -pub async fn write_export_document(data: ImportElectionEventSchema) -> Result { +pub fn write_export_document(data: &ImportElectionEventSchema) -> Result { // Serialize the data into JSON string - let data_str = serde_json::to_string_pretty(&data)?; + let data_str = serde_json::to_string_pretty(data)?; let data_bytes = data_str.into_bytes(); // Create and write the data into a temporary file @@ -223,8 +224,11 @@ fn get_export_election_event_filename( let election_event_hash: String = hash_sha256_file(file_path) .with_context(|| "Error hashing the exported election_event")? .iter() - .map(|byte| format!("{:02x}", byte)) - .collect(); + .fold(String::new(), |mut acc, byte| { + use std::fmt::Write; + let _ = write!(acc, "{byte:02x}"); + acc + }); let extension = if is_encrypted { "ezip" } else { "zip" }; Ok(format!( @@ -254,7 +258,7 @@ pub async fn get_image_file_from_s3( let bytes = s3::get_file_from_s3(s3_bucket.to_owned(), s3_path).await?; - let file_name = format!("document_{}_{}", document_id, doc_name); + let file_name = format!("document_{document_id}_{doc_name}"); let temp_file = generate_temp_file("", &file_name).context("generating temp file")?; let std_file = temp_file @@ -285,6 +289,7 @@ async fn process_images( ) -> Result> where F: Fn(&T) -> Option<&str> + Send + Sync, + T: Send + Sync, { let mut s3_files = Vec::new(); @@ -390,6 +395,7 @@ pub async fn process_event_images( /// Returns an error on missing passwords when required, /// IO/ZIP failures, subgraph export helper failures, or upload failures. #[instrument(err)] +#[allow(clippy::too_many_lines, clippy::large_futures)] pub async fn process_export_zip( tenant_id: &str, election_event_id: &str, @@ -425,7 +431,7 @@ pub async fn process_export_zip( &export_config, ) .await?; - let temp_election_event_file = write_export_document(export_data).await?; + let temp_election_event_file = write_export_document(&export_data)?; let election_event_filename = format!( "{}-{}.json", EDocuments::ELECTION_EVENT.to_file_name(), @@ -447,8 +453,8 @@ pub async fn process_export_zip( let file_name = file_path .file_name() .ok_or(anyhow!( - "Error getting file name from path: {:?}", - file_path + "Error getting file name from path: {}", + file_path.display() ))? .to_string_lossy() .to_string(); @@ -529,13 +535,13 @@ pub async fn process_export_zip( Some(report.id.clone()), ) .await? - .unwrap_or("".to_string()); + .unwrap_or(String::new()); wtr.write_record(&[ - report.id.to_string(), - report.election_id.unwrap_or_default().to_string(), - report.report_type.to_string(), - report.template_alias.unwrap_or_default().to_string(), + report.id.clone(), + report.election_id.unwrap_or_default().clone(), + report.report_type.clone(), + report.template_alias.unwrap_or_default().clone(), serde_json::to_string(&report.cron_config) .map_err(|e| anyhow!("Error serializing cron config: {e:?}"))?, report.encryption_policy.to_string(), @@ -612,8 +618,8 @@ pub async fn process_export_zip( let file_name = file_path .file_name() .ok_or(anyhow!( - "Error getting file name from path: {:?}", - file_path + "Error getting file name from path: {}", + file_path.display() ))? .to_string_lossy() .to_string(); @@ -816,7 +822,7 @@ pub async fn process_export_zip( .map_err(|e| anyhow!("Error finalizing ZIP file: {e:?}"))?; // Encrypt ZIP file if required - let encryption_password = export_config.password.unwrap_or("".to_string()); + let encryption_password = export_config.password.unwrap_or_default(); if encryption_password.is_empty() && (export_config.bulletin_board || export_config.reports) { return Err(anyhow!("Bulletin Board requires password")); } @@ -853,7 +859,7 @@ pub async fn process_export_zip( &hasura_transaction, upload_path .to_str() - .ok_or_else(|| anyhow!("Can't convert {:?} to string", upload_path))?, + .ok_or_else(|| anyhow!("Can't convert path to string: {}", upload_path.display()))?, zip_size, "application/zip", tenant_id, diff --git a/packages/windmill/src/services/export/export_schedule_events.rs b/packages/windmill/src/services/export/export_schedule_events.rs index dbf1833448..98beaf4453 100644 --- a/packages/windmill/src/services/export/export_schedule_events.rs +++ b/packages/windmill/src/services/export/export_schedule_events.rs @@ -72,7 +72,7 @@ pub async fn write_export_document( ] }; - let name = format!("scheduled_events-{}", election_event_id); + let name = format!("scheduled_events-{election_event_id}"); let mut writer = Writer::from_writer(vec![]); writer.write_record(&headers)?; @@ -82,7 +82,7 @@ pub async fn write_export_document( .as_object() .ok_or_else(|| anyhow!("Failed to convert ScheduledEvent to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values)?; diff --git a/packages/windmill/src/services/export/export_tally.rs b/packages/windmill/src/services/export/export_tally.rs index ba10a72f87..f4e333508b 100644 --- a/packages/windmill/src/services/export/export_tally.rs +++ b/packages/windmill/src/services/export/export_tally.rs @@ -88,7 +88,7 @@ pub async fn export_tally_session( .as_object() .ok_or_else(|| anyhow!("Failed to convert tally_session to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -150,7 +150,7 @@ pub async fn export_tally_session_execution( .as_object() .ok_or_else(|| anyhow!("Failed to convert tally_session_execution to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -212,7 +212,7 @@ pub async fn export_tally_session_contest( .as_object() .ok_or_else(|| anyhow!("Failed to convert tally_session_contest to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -269,7 +269,7 @@ pub async fn export_results_event( .as_object() .ok_or_else(|| anyhow!("Failed to convert results_event to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -329,7 +329,7 @@ pub async fn export_results_election_area( .as_object() .ok_or_else(|| anyhow!("Failed to convert results_election_area to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -391,7 +391,7 @@ pub async fn export_results_election( .as_object() .ok_or_else(|| anyhow!("Failed to convert results_election to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -468,7 +468,7 @@ pub async fn export_results_contest( .as_object() .ok_or_else(|| anyhow!("Failed to convert results_contest to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -534,7 +534,7 @@ pub async fn export_results_contest_candidate( .as_object() .ok_or_else(|| anyhow!("Failed to convert results_contests_candidate to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -611,7 +611,7 @@ pub async fn export_results_area_contest( .as_object() .ok_or_else(|| anyhow!("Failed to convert results_area_contest to JSON object"))? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -680,7 +680,7 @@ pub async fn export_results_area_contest_candidate( anyhow!("Failed to convert results_area_contests_candidate to JSON object") })? .values() - .map(|value| value.to_string()) + .map(std::string::ToString::to_string) .collect(); writer.write_record(&values); @@ -779,7 +779,7 @@ pub async fn read_tally_data( for result in &results { if let Err(e) = result { - return Err(anyhow::anyhow!("Export tally failed: {:?}", e)); + return Err(anyhow::anyhow!("Export tally failed: {e:?}")); } } diff --git a/packages/windmill/src/services/export/export_tally_results.rs b/packages/windmill/src/services/export/export_tally_results.rs index aa4d1cfcb5..dc402583c0 100644 --- a/packages/windmill/src/services/export/export_tally_results.rs +++ b/packages/windmill/src/services/export/export_tally_results.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2025 Sequent Tech Inc // // SPDX-License-Identifier: AGPL-3.0-only -//! Converts tally SQLite result databases into XLSX spreadsheets +//! Converts tally `SQLite` result databases into XLSX spreadsheets //! and updates tally session execution document pointers. use crate::postgres::document::get_document; use crate::postgres::tally_session_execution::get_last_tally_session_execution; @@ -17,22 +17,22 @@ use sequent_core::temp_path::generate_temp_file; use sequent_core::temp_path::get_file_size; use sequent_core::types::ceremonies::TallySessionDocuments; use std::path::Path; -use tracing::instrument; +use tracing::{info, instrument}; -/// Excel per-cell character limit enforced when copying SQLite text into worksheets. +/// Excel per-cell character limit enforced when copying `SQLite` text into worksheets. const EXCEL_STRING_LIMIT: usize = 32767; -/// Builds an XLSX from the execution’s SQLite results artifact, +/// Builds an XLSX from the execution’s `SQLite` results artifact, /// uploads it, and stores the new document id on the execution. /// /// # Errors /// -/// Returns an error when the SQLite document is missing, +/// Returns an error when the `SQLite` document is missing, /// temp download fails, conversion fails, DB update fails, or upload fails. /// /// # Panics /// -/// Panics if the SQLite document lookup unexpectedly returns `None` after the prior checks. +/// Panics if the `SQLite` document lookup unexpectedly returns `None` after the prior checks. #[instrument(err)] pub async fn export_tally_results_to_xlsx( hasura_transaction: &Transaction<'_>, @@ -55,29 +55,27 @@ pub async fn export_tally_results_to_xlsx( sqlite_document_id, ) .await - .map_err(|e| anyhow!("Failed to get document: {}", e))?; + .map_err(|e| anyhow!("Failed to get document: {e}"))?; - if sqlite_document.is_none() { + let Some(sqlite_document) = sqlite_document else { return Err(anyhow!("Document not found")); - } - - let sqlite_document = sqlite_document.unwrap(); + }; let sqlite_file = get_document_as_temp_file(&tenant_id, &sqlite_document) .await - .map_err(|e| anyhow!("Failed to get sqlite document as temp file: {}", e))?; + .map_err(|e| anyhow!("Failed to get sqlite document as temp file: {e}"))?; let xlsx_file_name = format!("results-{}", results_event_id.clone()); let xlsx_file = generate_temp_file(&xlsx_file_name, ".xlsx")?; convert_db_to_xlsx(sqlite_file.path(), xlsx_file.path()) .await - .map_err(|e| anyhow!("Failed to convert DB to XLSX: {}", e))?; + .map_err(|e| anyhow!("Failed to convert DB to XLSX: {e}"))?; let xlsx_file_path = xlsx_file.into_temp_path(); let xlsx_file_path_string = xlsx_file_path.to_string_lossy().to_string(); let xlsx_file_size = get_file_size(xlsx_file_path_string.as_str()) - .map_err(|e| anyhow!("Failed to get XLSX file size: {}", e))?; + .map_err(|e| anyhow!("Failed to get XLSX file size: {e}"))?; let new_tally_session_documents = TallySessionDocuments { xlsx: Some(document_id.clone()), @@ -92,7 +90,7 @@ pub async fn export_tally_results_to_xlsx( new_tally_session_documents, ) .await - .map_err(|e| anyhow!("Failed to update tally session execution documents: {}", e))?; + .map_err(|e| anyhow!("Failed to update tally session execution documents: {e}"))?; let _ = upload_and_return_document( hasura_transaction, @@ -101,30 +99,29 @@ pub async fn export_tally_results_to_xlsx( "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", &tenant_id, Some(election_event_id), - &format!("{}.xlsx", xlsx_file_name), + &format!("{xlsx_file_name}.xlsx"), Some(document_id), false, ) .await - .map_err(|e| anyhow!("Failed to upload XLSX document: {}", e))?; + .map_err(|e| anyhow!("Failed to upload XLSX document: {e}"))?; Ok(()) } /// Truncates `value_str` to the Excel’s max string length. -fn truncate_string_for_excel(value_str: String) -> String { - let truncated_text = if value_str.len() > EXCEL_STRING_LIMIT { +fn truncate_string_for_excel(value_str: &str) -> String { + if value_str.len() > EXCEL_STRING_LIMIT { value_str .chars() .take(EXCEL_STRING_LIMIT) .collect::() } else { value_str.to_string() - }; - truncated_text + } } -/// Converts a SQLite database file to an XLSX file, with each table as a worksheet. +/// Converts a `SQLite` database file to an XLSX file, with each table as a worksheet. /// /// # Panics /// @@ -132,7 +129,7 @@ fn truncate_string_for_excel(value_str: String) -> String { /// /// # Errors /// -/// Propagates SQLite open/query errors or XLSX writer failures. +/// Propagates `SQLite` open/query errors or XLSX writer failures. #[instrument(err)] async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { let db_conn = Connection::open(db_path)?; @@ -145,21 +142,27 @@ async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { for table_result in table_names_iter { let table_name: String = table_result?; - println!(" - Processing table: '{}'", table_name); + info!(" - Processing table: '{table_name}'"); let mut worksheet = workbook.add_worksheet(); worksheet.set_name(&table_name)?; - let mut table_stmt = db_conn.prepare(&format!("SELECT * FROM `{}`", table_name.clone()))?; + let mut table_stmt = db_conn.prepare(&format!("SELECT * FROM `{table_name}`"))?; let column_names: Vec = table_stmt .column_names() .iter() - .map(|s| s.to_string()) + .map(std::string::ToString::to_string) .collect(); let column_count = table_stmt.column_count(); for (col_index, col_name) in column_names.iter().enumerate() { - worksheet.write_string(0, col_index as u16, col_name)?; + let col_u64 = u16::try_from(col_index).map_err(|_| { + anyhow!( + "column index {col_index} exceeds Excel column limit {}", + u16::MAX + ) + })?; + worksheet.write_string(0, col_u64, col_name)?; } let mut rows = table_stmt.query([])?; @@ -169,25 +172,31 @@ async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { while let Some(row) = rows.next()? { for col_index in 0..column_count { let value_ref = row.get_ref(col_index)?; + let col = u16::try_from(col_index).map_err(|_| { + anyhow!( + "column index {col_index} exceeds Excel column limit {}", + u16::MAX + ) + })?; match value_ref.data_type() { Type::Integer => { let num: i64 = value_ref.as_i64()?; - worksheet.write_number(row_index, col_index as u16, num as f64)?; + worksheet.write_string(row_index, col, num.to_string())?; } Type::Real => { let num: f64 = value_ref.as_f64()?; - worksheet.write_number(row_index, col_index as u16, num)?; + worksheet.write_number(row_index, col, num)?; } Type::Text => { let text: String = value_ref.as_str()?.to_string(); - let truncated_text = truncate_string_for_excel(text); - worksheet.write_string(row_index, col_index as u16, &truncated_text)?; + let truncated_text = truncate_string_for_excel(&text); + worksheet.write_string(row_index, col, &truncated_text)?; } _ => { // For other types like Null, Blob, etc., write as a string representation let value_text = value_ref.as_str().unwrap_or("NULL"); - let truncated_text = truncate_string_for_excel(value_text.to_string()); - worksheet.write_string(row_index, col_index as u16, &truncated_text)?; + let truncated_text = truncate_string_for_excel(value_text); + worksheet.write_string(row_index, col, &truncated_text)?; } } } @@ -196,7 +205,7 @@ async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { } workbook.save(xlsx_path)?; - println!( + info!( "Conversion successful! XLSX file created at: {}", xlsx_path.display() ); @@ -210,7 +219,7 @@ async fn convert_db_to_xlsx(db_path: &Path, xlsx_path: &Path) -> Result<()> { /// # Errors /// /// Returns an error when no execution exists, documents are absent, -/// SQLite id is missing, or JSON handling fails. +/// `SQLite` id is missing, or JSON handling fails. /// /// # Panics /// @@ -229,32 +238,25 @@ pub async fn get_tally_session_execution_results_sqlite_file( tally_session_id, ) .await - .map_err(|e| anyhow!("Failed to get last tally session execution: {}", e))? + .map_err(|e| anyhow!("Failed to get last tally session execution: {e}"))? .ok_or(anyhow!( - "No tally session execution found for tally session id: {}", - tally_session_id + "No tally session execution found for tally session id: {tally_session_id}" ))?; - if tally_session_execution.documents.is_none() { - return Err(anyhow!( - "No documents found for tally session id: {}", - tally_session_id - )); - } - - let documents = serde_json::to_string(&tally_session_execution.documents.unwrap().clone())?; + let documents_json = tally_session_execution.documents.as_ref().ok_or(anyhow!( + "No documents found for tally session id: {tally_session_id}" + ))?; + let documents = serde_json::to_string(documents_json)?; let documents = deserialize_str::(&documents)?; - if (documents.sqlite.is_none()) { + if documents.sqlite.is_none() { return Err(anyhow!( - "No SQLite document found for tally session id: {}", - tally_session_id + "No SQLite document found for tally session id: {tally_session_id}" )); } let results_event_id = tally_session_execution.results_event_id.ok_or(anyhow!( - "No results event id found for tally session id: {}", - tally_session_id + "No results event id found for tally session id: {tally_session_id}" ))?; Ok((documents, results_event_id, tally_session_execution.id)) diff --git a/packages/windmill/src/services/export/export_tasks_execution.rs b/packages/windmill/src/services/export/export_tasks_execution.rs index 77e9d62977..4d3fe58811 100644 --- a/packages/windmill/src/services/export/export_tasks_execution.rs +++ b/packages/windmill/src/services/export/export_tasks_execution.rs @@ -60,7 +60,7 @@ pub async fn write_export_document( &temp_path_string, file_size, "application/json", - &first_task.tenant_id.to_string(), + &first_task.tenant_id.clone(), first_task.election_event_id.clone(), &name, Some(document_id.to_string()), @@ -106,7 +106,7 @@ pub async fn process_export( let _commit = hasura_transaction .commit() .await - .map_err(|e| anyhow!("Commit failed: {}", e)); + .map_err(|e| anyhow!("Commit failed: {e}")); Ok(()) } diff --git a/packages/windmill/src/services/export/export_template.rs b/packages/windmill/src/services/export/export_template.rs index eee6f74e0f..e675271cc8 100644 --- a/packages/windmill/src/services/export/export_template.rs +++ b/packages/windmill/src/services/export/export_template.rs @@ -30,8 +30,8 @@ pub async fn read_export_data( let transformed_templates: Vec