mirror of
https://github.com/Xevion/Pac-Man.git
synced 2026-01-31 12:25:04 -06:00
feat: allow health check forcing in debug, setup test mocking, plan out integration tests
This commit is contained in:
+64
-12
@@ -3,25 +3,20 @@ use axum_cookie::CookieLayer;
|
||||
use dashmap::DashMap;
|
||||
use jsonwebtoken::{DecodingKey, EncodingKey};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::{Notify, RwLock};
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use crate::data::pool::PgPool;
|
||||
use crate::{auth::AuthRegistry, config::Config, image::ImageStorage, routes};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Health {
|
||||
migrations: bool,
|
||||
database: bool,
|
||||
}
|
||||
|
||||
impl Health {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
migrations: false,
|
||||
database: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ok(&self) -> bool {
|
||||
self.migrations && self.database
|
||||
}
|
||||
@@ -44,10 +39,11 @@ pub struct AppState {
|
||||
pub db: Arc<PgPool>,
|
||||
pub health: Arc<RwLock<Health>>,
|
||||
pub image_storage: Arc<ImageStorage>,
|
||||
pub healthchecker_task: Arc<RwLock<Option<JoinHandle<()>>>>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new(config: Config, auth: AuthRegistry, db: PgPool) -> Self {
|
||||
pub async fn new(config: Config, auth: AuthRegistry, db: PgPool, shutdown_notify: Arc<Notify>) -> Self {
|
||||
let jwt_secret = config.jwt_secret.clone();
|
||||
|
||||
// Initialize image storage
|
||||
@@ -60,15 +56,71 @@ impl AppState {
|
||||
}
|
||||
};
|
||||
|
||||
Self {
|
||||
let app_state = Self {
|
||||
auth: Arc::new(auth),
|
||||
sessions: Arc::new(DashMap::new()),
|
||||
jwt_encoding_key: Arc::new(EncodingKey::from_secret(jwt_secret.as_bytes())),
|
||||
jwt_decoding_key: Arc::new(DecodingKey::from_secret(jwt_secret.as_bytes())),
|
||||
db: Arc::new(db),
|
||||
health: Arc::new(RwLock::new(Health::new())),
|
||||
health: Arc::new(RwLock::new(Health::default())),
|
||||
image_storage,
|
||||
healthchecker_task: Arc::new(RwLock::new(None)),
|
||||
};
|
||||
|
||||
// Start the healthchecker task
|
||||
{
|
||||
let health_state = app_state.health.clone();
|
||||
let db_pool = app_state.db.clone();
|
||||
let healthchecker_task = app_state.healthchecker_task.clone();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
tracing::trace!("Health checker task started");
|
||||
let mut backoff: u32 = 1;
|
||||
let mut next_sleep = Duration::from_secs(0);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown_notify.notified() => {
|
||||
tracing::trace!("Health checker received shutdown notification; exiting");
|
||||
break;
|
||||
}
|
||||
|
||||
_ = tokio::time::sleep(next_sleep) => {
|
||||
// Run health check
|
||||
}
|
||||
}
|
||||
|
||||
// Run the actual health check
|
||||
let ok = sqlx::query("SELECT 1").execute(&*db_pool).await.is_ok();
|
||||
{
|
||||
let mut h = health_state.write().await;
|
||||
h.set_database(ok);
|
||||
}
|
||||
if ok {
|
||||
tracing::trace!(database_ok = true, "Health check succeeded; scheduling next run in 90s");
|
||||
backoff = 1;
|
||||
next_sleep = Duration::from_secs(90);
|
||||
} else {
|
||||
backoff = (backoff.saturating_mul(2)).min(60);
|
||||
tracing::trace!(database_ok = false, backoff, "Health check failed; backing off");
|
||||
next_sleep = Duration::from_secs(backoff as u64);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Store the task handle
|
||||
let mut task_handle = healthchecker_task.write().await;
|
||||
*task_handle = Some(task);
|
||||
}
|
||||
|
||||
app_state
|
||||
}
|
||||
|
||||
/// Force an immediate health check (debug mode only)
|
||||
pub async fn check_health(&self) -> bool {
|
||||
let ok = sqlx::query("SELECT 1").execute(&*self.db).await.is_ok();
|
||||
let mut h = self.health.write().await;
|
||||
h.set_database(ok);
|
||||
ok
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use async_trait::async_trait;
|
||||
use mockall::automock;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::errors::ErrorResponse;
|
||||
@@ -12,6 +13,7 @@ pub struct AuthUser {
|
||||
pub avatar_url: Option<String>,
|
||||
}
|
||||
|
||||
#[automock]
|
||||
#[async_trait]
|
||||
pub trait OAuthProvider: Send + Sync {
|
||||
fn id(&self) -> &'static str;
|
||||
|
||||
@@ -3,8 +3,8 @@ use crate::{
|
||||
auth::AuthRegistry,
|
||||
config::Config,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tracing::{info, trace, warn};
|
||||
|
||||
#[cfg(unix)]
|
||||
@@ -47,17 +47,16 @@ async fn main() {
|
||||
panic!("failed to run database migrations: {}", e);
|
||||
}
|
||||
|
||||
let app_state = AppState::new(config, auth, db);
|
||||
// Create the shutdown notification before creating AppState
|
||||
let notify = Arc::new(Notify::new());
|
||||
|
||||
let app_state = AppState::new(config, auth, db, notify.clone()).await;
|
||||
{
|
||||
// migrations succeeded
|
||||
let mut h = app_state.health.write().await;
|
||||
h.set_migrations(true);
|
||||
}
|
||||
|
||||
// Extract needed parts for health checker before moving app_state
|
||||
let health_state = app_state.health.clone();
|
||||
let db_pool = app_state.db.clone();
|
||||
|
||||
let app = create_router(app_state);
|
||||
|
||||
info!(%addr, "Starting HTTP server bind");
|
||||
@@ -65,45 +64,8 @@ async fn main() {
|
||||
info!(%addr, "HTTP server listening");
|
||||
|
||||
// coordinated graceful shutdown with timeout
|
||||
let notify = Arc::new(Notify::new());
|
||||
let (tx_signal, rx_signal) = watch::channel::<Option<Instant>>(None);
|
||||
|
||||
// Spawn background health checker (listens for shutdown via notify)
|
||||
{
|
||||
let health_state = health_state.clone();
|
||||
let db_pool = db_pool.clone();
|
||||
let notify_for_health = notify.clone();
|
||||
tokio::spawn(async move {
|
||||
trace!("Health checker task started");
|
||||
let mut backoff: u32 = 1;
|
||||
let mut next_sleep = Duration::from_secs(0);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = notify_for_health.notified() => {
|
||||
trace!("Health checker received shutdown notification; exiting");
|
||||
break;
|
||||
}
|
||||
_ = tokio::time::sleep(next_sleep) => {
|
||||
let ok = sqlx::query("SELECT 1").execute(&*db_pool).await.is_ok();
|
||||
{
|
||||
let mut h = health_state.write().await;
|
||||
h.set_database(ok);
|
||||
}
|
||||
if ok {
|
||||
trace!(database_ok = true, "Health check succeeded; scheduling next run in 90s");
|
||||
backoff = 1;
|
||||
next_sleep = Duration::from_secs(90);
|
||||
} else {
|
||||
backoff = (backoff.saturating_mul(2)).min(60);
|
||||
trace!(database_ok = false, backoff, "Health check failed; backing off");
|
||||
next_sleep = Duration::from_secs(backoff as u64);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let notify = notify.clone();
|
||||
let tx = tx_signal.clone();
|
||||
@@ -127,9 +89,8 @@ async fn main() {
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
let notify_for_server = notify.clone();
|
||||
let server = axum::serve(listener, app).with_graceful_shutdown(async move {
|
||||
notify_for_server.notified().await;
|
||||
notify.notified().await;
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
|
||||
@@ -371,7 +371,16 @@ pub async fn list_providers_handler(State(app_state): State<AppState>) -> axum::
|
||||
axum::Json(providers).into_response()
|
||||
}
|
||||
|
||||
pub async fn health_handler(State(app_state): State<AppState>) -> axum::response::Response {
|
||||
pub async fn health_handler(
|
||||
State(app_state): State<AppState>,
|
||||
Query(params): Query<std::collections::HashMap<String, String>>,
|
||||
) -> axum::response::Response {
|
||||
// Force health check in debug mode
|
||||
#[cfg(debug_assertions)]
|
||||
if params.get("force").is_some() {
|
||||
app_state.check_health().await;
|
||||
}
|
||||
|
||||
let ok = app_state.health.read().await.ok();
|
||||
let status = if ok { StatusCode::OK } else { StatusCode::SERVICE_UNAVAILABLE };
|
||||
let body = serde_json::json!({ "ok": ok });
|
||||
|
||||
Reference in New Issue
Block a user