feat(server): make database and OAuth providers optional configuration

All external services (database, Discord/GitHub OAuth, S3) can now be individually disabled by omitting their environment variables. The server gracefully degrades functionality when services are unavailable. Partial configuration of any service group triggers a clear error at startup.

- Database: Falls back to dummy pool when DATABASE_URL is unset
- OAuth: Providers only registered when credentials are complete
- S3: Image storage disabled when credentials are missing
- Health checks reflect actual configuration state
This commit is contained in:
2025-12-30 03:59:56 -06:00
parent 7d07071e64
commit 884f42a855
12 changed files with 725 additions and 150 deletions
+2
View File
@@ -35,3 +35,5 @@ dev:
# Build and preview frontend (web::up) # Build and preview frontend (web::up)
up: up:
@just web::up @just web::up
alias vcpkg := pacman::vcpkg
+47 -15
View File
@@ -18,10 +18,16 @@ use crate::{auth::AuthRegistry, config::Config, image::ImageStorage, routes};
pub struct Health { pub struct Health {
migrations: bool, migrations: bool,
database: bool, database: bool,
/// Whether database is configured at all
database_enabled: bool,
} }
impl Health { impl Health {
pub fn ok(&self) -> bool { pub fn ok(&self) -> bool {
// If database is not enabled, we're healthy as long as we don't require it
if !self.database_enabled {
return true;
}
self.migrations && self.database self.migrations && self.database
} }
@@ -32,6 +38,10 @@ impl Health {
pub fn set_database(&mut self, ok: bool) { pub fn set_database(&mut self, ok: bool) {
self.database = ok; self.database = ok;
} }
pub fn set_database_enabled(&mut self, enabled: bool) {
self.database_enabled = enabled;
}
} }
#[derive(Clone)] #[derive(Clone)]
@@ -42,33 +52,45 @@ pub struct AppState {
pub jwt_decoding_key: Arc<DecodingKey>, pub jwt_decoding_key: Arc<DecodingKey>,
pub db: PgPool, pub db: PgPool,
pub health: Arc<RwLock<Health>>, pub health: Arc<RwLock<Health>>,
pub image_storage: Arc<ImageStorage>, pub image_storage: Option<Arc<ImageStorage>>,
pub healthchecker_task: Arc<RwLock<Option<JoinHandle<()>>>>, pub healthchecker_task: Arc<RwLock<Option<JoinHandle<()>>>>,
/// Whether the database is actually configured (vs SQLite in-memory fallback)
pub database_configured: bool,
} }
impl AppState { impl AppState {
pub async fn new(config: Config, auth: AuthRegistry, db: PgPool, shutdown_notify: Arc<Notify>) -> Self { pub async fn new(config: Config, auth: AuthRegistry, db: PgPool, shutdown_notify: Arc<Notify>) -> Self {
Self::new_with_database(config, auth, db, shutdown_notify, true).await Self::new_with_options(config, auth, db, shutdown_notify, true).await
} }
pub async fn new_with_database( pub async fn new_with_options(
config: Config, config: Config,
auth: AuthRegistry, auth: AuthRegistry,
db: PgPool, db: PgPool,
shutdown_notify: Arc<Notify>, shutdown_notify: Arc<Notify>,
use_database: bool, use_database_healthcheck: bool,
) -> Self { ) -> Self {
let jwt_secret = config.jwt_secret.clone(); let jwt_secret = config.jwt_secret.clone();
let database_configured = config.database.is_some();
// Initialize image storage // Initialize image storage only if S3 is configured
let image_storage = match ImageStorage::from_config(&config) { let image_storage = config
Ok(storage) => Arc::new(storage), .s3
Err(e) => { .as_ref()
tracing::warn!(error = %e, "Failed to initialize image storage, avatar processing will be disabled"); .and_then(|s3_config| match ImageStorage::from_config(s3_config) {
// Create a dummy storage that will fail gracefully Ok(storage) => {
Arc::new(ImageStorage::new(&config, "dummy").unwrap_or_else(|_| panic!("Failed to create dummy image storage"))) tracing::info!("Image storage initialized");
} Some(Arc::new(storage))
}; }
Err(e) => {
tracing::warn!(error = %e, "Failed to initialize image storage, avatar processing will be disabled");
None
}
});
if image_storage.is_none() && config.s3.is_none() {
tracing::info!("S3 not configured, image storage disabled");
}
let app_state = Self { let app_state = Self {
auth: Arc::new(auth), auth: Arc::new(auth),
@@ -79,10 +101,17 @@ impl AppState {
health: Arc::new(RwLock::new(Health::default())), health: Arc::new(RwLock::new(Health::default())),
image_storage, image_storage,
healthchecker_task: Arc::new(RwLock::new(None)), healthchecker_task: Arc::new(RwLock::new(None)),
database_configured,
}; };
// Start the healthchecker task only if database is being used // Set database enabled status
if use_database { {
let mut h = app_state.health.write().await;
h.set_database_enabled(database_configured);
}
// Start the healthchecker task only if database healthcheck is enabled
if use_database_healthcheck && database_configured {
let health_state = app_state.health.clone(); let health_state = app_state.health.clone();
let db_pool = app_state.db.clone(); let db_pool = app_state.db.clone();
let healthchecker_task = app_state.healthchecker_task.clone(); let healthchecker_task = app_state.healthchecker_task.clone();
@@ -131,6 +160,9 @@ impl AppState {
/// Force an immediate health check (debug mode only) /// Force an immediate health check (debug mode only)
pub async fn check_health(&self) -> bool { pub async fn check_health(&self) -> bool {
if !self.database_configured {
return true;
}
let ok = sqlx::query("SELECT 1").execute(&self.db).await.is_ok(); let ok = sqlx::query("SELECT 1").execute(&self.db).await.is_ok();
let mut h = self.health.write().await; let mut h = self.health.write().await;
h.set_database(ok); h.set_database(ok);
+44 -24
View File
@@ -21,38 +21,53 @@ pub struct AuthRegistry {
} }
impl AuthRegistry { impl AuthRegistry {
/// Create a new AuthRegistry with providers based on configuration.
/// Only providers with complete configuration will be registered.
pub fn new(config: &Config) -> Result<Self, oauth2::url::ParseError> { pub fn new(config: &Config) -> Result<Self, oauth2::url::ParseError> {
let http = reqwest::ClientBuilder::new() let http = reqwest::ClientBuilder::new()
.redirect(reqwest::redirect::Policy::none()) .redirect(reqwest::redirect::Policy::none())
.build() .build()
.expect("HTTP client should build"); .expect("HTTP client should build");
let github_client: BasicClient<EndpointSet, EndpointNotSet, EndpointNotSet, EndpointNotSet, EndpointSet> =
BasicClient::new(oauth2::ClientId::new(config.github_client_id.clone()))
.set_client_secret(oauth2::ClientSecret::new(config.github_client_secret.clone()))
.set_auth_uri(oauth2::AuthUrl::new("https://github.com/login/oauth/authorize".to_string())?)
.set_token_uri(oauth2::TokenUrl::new(
"https://github.com/login/oauth/access_token".to_string(),
)?)
.set_redirect_uri(
oauth2::RedirectUrl::new(format!("{}/auth/github/callback", config.public_base_url))
.expect("Invalid redirect URI"),
);
let mut providers: HashMap<&'static str, Arc<dyn provider::OAuthProvider>> = HashMap::new(); let mut providers: HashMap<&'static str, Arc<dyn provider::OAuthProvider>> = HashMap::new();
providers.insert("github", github::GitHubProvider::new(github_client, http.clone()));
// Discord OAuth client // Register GitHub provider if configured
let discord_client: BasicClient<EndpointSet, EndpointNotSet, EndpointNotSet, EndpointNotSet, EndpointSet> = if let Some(github_config) = &config.github {
BasicClient::new(oauth2::ClientId::new(config.discord_client_id.clone())) let github_client: BasicClient<EndpointSet, EndpointNotSet, EndpointNotSet, EndpointNotSet, EndpointSet> =
.set_client_secret(oauth2::ClientSecret::new(config.discord_client_secret.clone())) BasicClient::new(oauth2::ClientId::new(github_config.client_id.clone()))
.set_auth_uri(oauth2::AuthUrl::new("https://discord.com/api/oauth2/authorize".to_string())?) .set_client_secret(oauth2::ClientSecret::new(github_config.client_secret.clone()))
.set_token_uri(oauth2::TokenUrl::new("https://discord.com/api/oauth2/token".to_string())?) .set_auth_uri(oauth2::AuthUrl::new("https://github.com/login/oauth/authorize".to_string())?)
.set_redirect_uri( .set_token_uri(oauth2::TokenUrl::new(
oauth2::RedirectUrl::new(format!("{}/auth/discord/callback", config.public_base_url)) "https://github.com/login/oauth/access_token".to_string(),
.expect("Invalid redirect URI"), )?)
); .set_redirect_uri(
providers.insert("discord", discord::DiscordProvider::new(discord_client, http)); oauth2::RedirectUrl::new(format!("{}/auth/github/callback", config.public_base_url))
.expect("Invalid redirect URI"),
);
providers.insert("github", github::GitHubProvider::new(github_client, http.clone()));
tracing::info!("GitHub OAuth provider registered");
}
// Register Discord provider if configured
if let Some(discord_config) = &config.discord {
let discord_client: BasicClient<EndpointSet, EndpointNotSet, EndpointNotSet, EndpointNotSet, EndpointSet> =
BasicClient::new(oauth2::ClientId::new(discord_config.client_id.clone()))
.set_client_secret(oauth2::ClientSecret::new(discord_config.client_secret.clone()))
.set_auth_uri(oauth2::AuthUrl::new("https://discord.com/api/oauth2/authorize".to_string())?)
.set_token_uri(oauth2::TokenUrl::new("https://discord.com/api/oauth2/token".to_string())?)
.set_redirect_uri(
oauth2::RedirectUrl::new(format!("{}/auth/discord/callback", config.public_base_url))
.expect("Invalid redirect URI"),
);
providers.insert("discord", discord::DiscordProvider::new(discord_client, http));
tracing::info!("Discord OAuth provider registered");
}
if providers.is_empty() {
tracing::warn!("No OAuth providers configured - authentication will be unavailable");
}
Ok(Self { providers }) Ok(Self { providers })
} }
@@ -64,4 +79,9 @@ impl AuthRegistry {
pub fn values(&self) -> impl Iterator<Item = &Arc<dyn provider::OAuthProvider>> { pub fn values(&self) -> impl Iterator<Item = &Arc<dyn provider::OAuthProvider>> {
self.providers.values() self.providers.values()
} }
/// Get the number of registered providers
pub fn len(&self) -> usize {
self.providers.len()
}
} }
+232 -31
View File
@@ -1,36 +1,184 @@
use figment::{providers::Env, value::UncasedStr, Figment}; use figment::{providers::Env, value::UncasedStr, Figment};
use serde::{Deserialize, Deserializer}; use serde::{Deserialize, Deserializer};
use std::env;
#[derive(Debug, Clone, Deserialize)] /// Database configuration
pub struct Config { #[derive(Debug, Clone)]
// Database URL pub struct DatabaseConfig {
pub database_url: String, pub url: String,
// Discord Credentials }
#[serde(deserialize_with = "deserialize_string_from_any")]
pub discord_client_id: String, /// Discord OAuth configuration
pub discord_client_secret: String, #[derive(Debug, Clone)]
// GitHub Credentials pub struct DiscordConfig {
#[serde(deserialize_with = "deserialize_string_from_any")] pub client_id: String,
pub github_client_id: String, pub client_secret: String,
pub github_client_secret: String, }
// S3 Credentials
pub s3_access_key: String, /// GitHub OAuth configuration
pub s3_secret_access_key: String, #[derive(Debug, Clone)]
pub s3_bucket_name: String, pub struct GithubConfig {
pub s3_public_base_url: String, pub client_id: String,
// Server Details pub client_secret: String,
#[serde(default = "default_port")] }
pub port: u16,
#[serde(default = "default_host")] /// S3 storage configuration
pub host: std::net::IpAddr, #[derive(Debug, Clone)]
#[serde(default = "default_shutdown_timeout")] pub struct S3Config {
pub shutdown_timeout_seconds: u32, pub access_key: String,
// Public base URL used for OAuth redirect URIs pub secret_access_key: String,
pub bucket_name: String,
pub public_base_url: String, pub public_base_url: String,
// JWT }
/// Main application configuration
#[derive(Debug, Clone, Deserialize)]
#[serde(from = "RawConfig")]
pub struct Config {
/// Database configuration - if None, uses SQLite in-memory
pub database: Option<DatabaseConfig>,
/// Discord OAuth - if None, Discord auth is disabled
pub discord: Option<DiscordConfig>,
/// GitHub OAuth - if None, GitHub auth is disabled
pub github: Option<GithubConfig>,
/// S3 storage - if None, image storage is disabled
pub s3: Option<S3Config>,
/// Server port
pub port: u16,
/// Server host address
pub host: std::net::IpAddr,
/// Graceful shutdown timeout in seconds
pub shutdown_timeout_seconds: u32,
/// Public base URL for OAuth redirects
pub public_base_url: String,
/// JWT secret for session tokens
pub jwt_secret: String, pub jwt_secret: String,
} }
/// Raw configuration loaded directly from environment variables
/// This is an intermediate representation that gets validated and converted to Config
#[derive(Debug, Deserialize)]
struct RawConfig {
// Database
database_url: Option<String>,
// Discord OAuth
#[serde(default, deserialize_with = "deserialize_optional_string_from_any")]
discord_client_id: Option<String>,
discord_client_secret: Option<String>,
// GitHub OAuth
#[serde(default, deserialize_with = "deserialize_optional_string_from_any")]
github_client_id: Option<String>,
github_client_secret: Option<String>,
// S3
s3_access_key: Option<String>,
s3_secret_access_key: Option<String>,
s3_bucket_name: Option<String>,
s3_public_base_url: Option<String>,
// Server
#[serde(default = "default_port")]
port: u16,
#[serde(default = "default_host")]
host: std::net::IpAddr,
#[serde(default = "default_shutdown_timeout")]
shutdown_timeout_seconds: u32,
// Required
public_base_url: String,
jwt_secret: String,
}
impl From<RawConfig> for Config {
fn from(raw: RawConfig) -> Self {
// Validate database config
let database = raw.database_url.map(|url| DatabaseConfig { url });
// Validate Discord config - if any field is set, all must be set
let discord = validate_feature_group(
"Discord",
&[
("DISCORD_CLIENT_ID", raw.discord_client_id.as_ref()),
("DISCORD_CLIENT_SECRET", raw.discord_client_secret.as_ref()),
],
)
.map(|_| DiscordConfig {
client_id: raw.discord_client_id.unwrap(),
client_secret: raw.discord_client_secret.unwrap(),
});
// Validate GitHub config - if any field is set, all must be set
let github = validate_feature_group(
"GitHub",
&[
("GITHUB_CLIENT_ID", raw.github_client_id.as_ref()),
("GITHUB_CLIENT_SECRET", raw.github_client_secret.as_ref()),
],
)
.map(|_| GithubConfig {
client_id: raw.github_client_id.unwrap(),
client_secret: raw.github_client_secret.unwrap(),
});
// Validate S3 config - if any field is set, all must be set
let s3 = validate_feature_group(
"S3",
&[
("S3_ACCESS_KEY", raw.s3_access_key.as_ref()),
("S3_SECRET_ACCESS_KEY", raw.s3_secret_access_key.as_ref()),
("S3_BUCKET_NAME", raw.s3_bucket_name.as_ref()),
("S3_PUBLIC_BASE_URL", raw.s3_public_base_url.as_ref()),
],
)
.map(|_| S3Config {
access_key: raw.s3_access_key.unwrap(),
secret_access_key: raw.s3_secret_access_key.unwrap(),
bucket_name: raw.s3_bucket_name.unwrap(),
public_base_url: raw.s3_public_base_url.unwrap(),
});
Config {
database,
discord,
github,
s3,
port: raw.port,
host: raw.host,
shutdown_timeout_seconds: raw.shutdown_timeout_seconds,
public_base_url: raw.public_base_url,
jwt_secret: raw.jwt_secret,
}
}
}
/// Validates a feature group - returns Some(()) if all fields are set, None if all are unset,
/// or panics if only some fields are set (partial configuration).
fn validate_feature_group(feature_name: &str, fields: &[(&str, Option<&String>)]) -> Option<()> {
let set_fields: Vec<&str> = fields.iter().filter(|(_, v)| v.is_some()).map(|(name, _)| *name).collect();
let unset_fields: Vec<&str> = fields.iter().filter(|(_, v)| v.is_none()).map(|(name, _)| *name).collect();
if set_fields.is_empty() {
// All unset - feature disabled
None
} else if unset_fields.is_empty() {
// All set - feature enabled
Some(())
} else {
// Partial configuration - this is an error
panic!(
"{} configuration is incomplete. Set fields: [{}]. Missing fields: [{}]. \
Either set all {} environment variables or none of them.",
feature_name,
set_fields.join(", "),
unset_fields.join(", "),
feature_name
);
}
}
// Standard User-Agent: name/version (+site) // Standard User-Agent: name/version (+site)
pub const USER_AGENT: &str = concat!( pub const USER_AGENT: &str = concat!(
env!("CARGO_PKG_NAME"), env!("CARGO_PKG_NAME"),
@@ -51,17 +199,18 @@ fn default_shutdown_timeout() -> u32 {
5 5
} }
fn deserialize_string_from_any<'de, D>(deserializer: D) -> Result<String, D::Error> fn deserialize_optional_string_from_any<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
use serde_json::Value; use serde_json::Value;
let value = Value::deserialize(deserializer)?; let value: Option<Value> = Option::deserialize(deserializer)?;
match value { match value {
Value::String(s) => Ok(s), Some(Value::String(s)) => Ok(Some(s)),
Value::Number(n) => Ok(n.to_string()), Some(Value::Number(n)) => Ok(Some(n.to_string())),
_ => Err(serde::de::Error::custom("Expected string or number")), Some(Value::Null) | None => Ok(None),
_ => Err(serde::de::Error::custom("Expected string, number, or null")),
} }
} }
@@ -77,3 +226,55 @@ pub fn load_config() -> Config {
.extract() .extract()
.expect("Failed to load config") .expect("Failed to load config")
} }
/// Create a minimal config for testing with specific overrides
/// This is useful for tests that don't need full configuration
#[cfg(test)]
pub fn test_config() -> Config {
Config {
database: None,
discord: None,
github: None,
s3: None,
port: 0,
host: "127.0.0.1".parse().unwrap(),
shutdown_timeout_seconds: 5,
public_base_url: "http://localhost:3000".to_string(),
jwt_secret: "test_jwt_secret_key_for_testing_only".to_string(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_validate_feature_group_all_set() {
let a = Some("value_a".to_string());
let b = Some("value_b".to_string());
let result = validate_feature_group("Test", &[("A", a.as_ref()), ("B", b.as_ref())]);
assert!(result.is_some());
}
#[test]
fn test_validate_feature_group_none_set() {
let result = validate_feature_group("Test", &[("A", None), ("B", None)]);
assert!(result.is_none());
}
#[test]
#[should_panic(expected = "Test configuration is incomplete")]
fn test_validate_feature_group_partial_panics() {
let a = Some("value_a".to_string());
validate_feature_group("Test", &[("A", a.as_ref()), ("B", None)]);
}
#[test]
fn test_minimal_config() {
let config = test_config();
assert!(config.database.is_none());
assert!(config.discord.is_none());
assert!(config.github.is_none());
assert!(config.s3.is_none());
}
}
+17
View File
@@ -3,6 +3,12 @@ use tracing::{info, warn};
pub type PgPool = Pool<Postgres>; pub type PgPool = Pool<Postgres>;
/// Create a PostgreSQL database pool.
///
/// - `immediate`: If true, establishes connection immediately (panics on failure).
/// If false, uses lazy connection (for tests or when database may not be needed).
/// - `database_url`: The database connection URL.
/// - `max_connections`: Maximum number of connections in the pool.
pub async fn create_pool(immediate: bool, database_url: &str, max_connections: u32) -> PgPool { pub async fn create_pool(immediate: bool, database_url: &str, max_connections: u32) -> PgPool {
info!(immediate, "Connecting to PostgreSQL"); info!(immediate, "Connecting to PostgreSQL");
@@ -19,3 +25,14 @@ pub async fn create_pool(immediate: bool, database_url: &str, max_connections: u
.expect("Failed to create lazy database pool") .expect("Failed to create lazy database pool")
} }
} }
/// Create a dummy pool that will fail on any actual database operation.
/// Used when database is not configured but the app still needs to start.
pub fn create_dummy_pool() -> PgPool {
// This creates a pool with an invalid URL that will fail on actual use
// The pool itself can be created (lazy), but any operation will fail
PgPoolOptions::new()
.max_connections(1)
.connect_lazy("postgres://invalid:invalid@localhost:5432/invalid")
.expect("Failed to create dummy pool")
}
+7 -5
View File
@@ -1,6 +1,8 @@
use serde::Serialize; use serde::Serialize;
use sqlx::FromRow; use sqlx::FromRow;
use super::pool::PgPool;
#[derive(Debug, Clone, Serialize, FromRow)] #[derive(Debug, Clone, Serialize, FromRow)]
pub struct User { pub struct User {
pub id: i64, pub id: i64,
@@ -23,7 +25,7 @@ pub struct OAuthAccount {
pub updated_at: chrono::DateTime<chrono::Utc>, pub updated_at: chrono::DateTime<chrono::Utc>,
} }
pub async fn find_user_by_email(pool: &sqlx::PgPool, email: &str) -> Result<Option<User>, sqlx::Error> { pub async fn find_user_by_email(pool: &PgPool, email: &str) -> Result<Option<User>, sqlx::Error> {
sqlx::query_as::<_, User>( sqlx::query_as::<_, User>(
r#" r#"
SELECT id, email, created_at, updated_at SELECT id, email, created_at, updated_at
@@ -37,7 +39,7 @@ pub async fn find_user_by_email(pool: &sqlx::PgPool, email: &str) -> Result<Opti
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub async fn link_oauth_account( pub async fn link_oauth_account(
pool: &sqlx::PgPool, pool: &PgPool,
user_id: i64, user_id: i64,
provider: &str, provider: &str,
provider_user_id: &str, provider_user_id: &str,
@@ -66,7 +68,7 @@ pub async fn link_oauth_account(
.await .await
} }
pub async fn create_user(pool: &sqlx::PgPool, email: Option<&str>) -> Result<User, sqlx::Error> { pub async fn create_user(pool: &PgPool, email: Option<&str>) -> Result<User, sqlx::Error> {
sqlx::query_as::<_, User>( sqlx::query_as::<_, User>(
r#" r#"
INSERT INTO users (email) INSERT INTO users (email)
@@ -81,7 +83,7 @@ pub async fn create_user(pool: &sqlx::PgPool, email: Option<&str>) -> Result<Use
} }
pub async fn find_user_by_provider_id( pub async fn find_user_by_provider_id(
pool: &sqlx::PgPool, pool: &PgPool,
provider: &str, provider: &str,
provider_user_id: &str, provider_user_id: &str,
) -> Result<Option<User>, sqlx::Error> { ) -> Result<Option<User>, sqlx::Error> {
@@ -110,7 +112,7 @@ pub struct ProviderPublic {
pub avatar_url: Option<String>, pub avatar_url: Option<String>,
} }
pub async fn list_user_providers(pool: &sqlx::PgPool, user_id: i64) -> Result<Vec<ProviderPublic>, sqlx::Error> { pub async fn list_user_providers(pool: &PgPool, user_id: i64) -> Result<Vec<ProviderPublic>, sqlx::Error> {
let recs = sqlx::query_as::<_, ProviderPublic>( let recs = sqlx::query_as::<_, ProviderPublic>(
r#" r#"
SELECT provider, provider_user_id, email, username, display_name, avatar_url SELECT provider, provider_user_id, email, username, display_name, avatar_url
+10 -10
View File
@@ -5,10 +5,10 @@ use s3::Bucket;
use sha2::Digest; use sha2::Digest;
use tracing::trace; use tracing::trace;
use crate::config::Config; use crate::config::S3Config;
/// Minimal S3-backed image storage. This keeps things intentionally simple for now: /// Minimal S3-backed image storage. This keeps things intentionally simple for now:
/// - construct from existing `Config` /// - construct from existing `S3Config`
/// - upload raw bytes under a key /// - upload raw bytes under a key
/// - upload a local file by path (reads whole file into memory) /// - upload a local file by path (reads whole file into memory)
/// - generate a simple presigned GET URL /// - generate a simple presigned GET URL
@@ -22,14 +22,14 @@ pub struct ImageStorage {
} }
impl ImageStorage { impl ImageStorage {
/// Create a new storage for a specific `bucket_name` using settings from `Config`. /// Create a new storage for a specific `bucket_name` using the provided S3 config.
/// ///
/// This uses a custom region + endpoint so it works across AWS S3 and compatible services /// This uses a custom region + endpoint so it works across AWS S3 and compatible services
/// such as Cloudflare R2 and MinIO. /// such as Cloudflare R2 and MinIO.
pub fn new(config: &Config, bucket_name: impl Into<String>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> { pub fn new(config: &S3Config, bucket_name: impl Into<String>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let credentials = s3::creds::Credentials::new( let credentials = s3::creds::Credentials::new(
Some(&config.s3_access_key), Some(&config.access_key),
Some(&config.s3_secret_access_key), Some(&config.secret_access_key),
None, // security token None, // security token
None, // session token None, // session token
None, // profile None, // profile
@@ -46,7 +46,7 @@ impl ImageStorage {
Ok(Self { Ok(Self {
bucket: Arc::new(bucket), bucket: Arc::new(bucket),
public_base_url: config.s3_public_base_url.clone(), public_base_url: config.public_base_url.clone(),
}) })
} }
@@ -172,9 +172,9 @@ pub struct AvatarUrls {
} }
impl ImageStorage { impl ImageStorage {
/// Create a new storage using the default bucket from `Config`. /// Create a new storage using the bucket from `S3Config`.
pub fn from_config(config: &Config) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> { pub fn from_config(config: &S3Config) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
Self::new(config, &config.s3_bucket_name) Self::new(config, &config.bucket_name)
} }
} }
+37 -8
View File
@@ -5,6 +5,7 @@ use crate::{
app::{create_router, AppState}, app::{create_router, AppState},
auth::AuthRegistry, auth::AuthRegistry,
config::Config, config::Config,
data::pool::{create_dummy_pool, create_pool},
}; };
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;
@@ -48,24 +49,52 @@ async fn main() {
logging::setup_logging(); logging::setup_logging();
trace!(host = %config.host, port = config.port, shutdown_timeout_seconds = config.shutdown_timeout_seconds, "Loaded server configuration"); trace!(host = %config.host, port = config.port, shutdown_timeout_seconds = config.shutdown_timeout_seconds, "Loaded server configuration");
// Log configuration status
info!(
database = config.database.is_some(),
discord = config.discord.is_some(),
github = config.github.is_some(),
s3 = config.s3.is_some(),
"Feature configuration"
);
let addr = std::net::SocketAddr::new(config.host, config.port); let addr = std::net::SocketAddr::new(config.host, config.port);
let shutdown_timeout = std::time::Duration::from_secs(config.shutdown_timeout_seconds as u64); let shutdown_timeout = std::time::Duration::from_secs(config.shutdown_timeout_seconds as u64);
let auth = AuthRegistry::new(&config).expect("auth initializer");
let db = data::pool::create_pool(true, &config.database_url, 10).await;
// Run database migrations at startup // Initialize auth registry (only enabled providers will be registered)
if let Err(e) = sqlx::migrate!("./migrations").run(&db).await { let auth = AuthRegistry::new(&config).expect("auth initializer");
panic!("failed to run database migrations: {}", e);
} // Initialize database - either connect to configured database or create a dummy pool
let db = if let Some(ref db_config) = config.database {
info!("Connecting to configured database");
let pool = create_pool(true, &db_config.url, 10).await;
// Run migrations
info!("Running database migrations");
if let Err(e) = sqlx::migrate!("./migrations").run(&pool).await {
panic!("failed to run database migrations: {}", e);
}
pool
} else {
info!("No database configured, creating dummy pool (database-dependent features will be unavailable)");
create_dummy_pool()
};
// Create the shutdown notification before creating AppState // Create the shutdown notification before creating AppState
let notify = Arc::new(Notify::new()); let notify = Arc::new(Notify::new());
let app_state = AppState::new(config, auth, db, notify.clone()).await; let app_state = AppState::new(config, auth, db, notify.clone()).await;
{ {
// migrations succeeded // Set health status based on configuration
let mut h = app_state.health.write().await; let mut h = app_state.health.write().await;
h.set_migrations(true); if app_state.database_configured {
// Database was configured - migrations ran successfully
h.set_migrations(true);
h.set_database(true);
}
// If database is not configured, Health::ok() returns true by default
// because database_enabled is false
} }
let app = create_router(app_state); let app = create_router(app_state);
+59 -27
View File
@@ -53,6 +53,17 @@ pub async fn oauth_callback_handler(
Query(params): Query<OAuthCallbackParams>, Query(params): Query<OAuthCallbackParams>,
cookie: CookieManager, cookie: CookieManager,
) -> axum::response::Response { ) -> axum::response::Response {
// Check if database is configured - required for OAuth callback to work
if !app_state.database_configured {
warn!("OAuth callback attempted but database is not configured");
return ErrorResponse::with_status(
StatusCode::SERVICE_UNAVAILABLE,
"database_not_configured",
Some("Database is not configured. User authentication requires a database.".into()),
)
.into_response();
}
// Validate provider // Validate provider
let Some(prov) = app_state.auth.get(&provider) else { let Some(prov) = app_state.auth.get(&provider) else {
warn!(%provider, "Unknown OAuth provider"); warn!(%provider, "Unknown OAuth provider");
@@ -146,33 +157,35 @@ pub async fn oauth_callback_handler(
session::set_session_cookie(&cookie, &session_token); session::set_session_cookie(&cookie, &session_token);
info!(%provider, "Signed in successfully"); info!(%provider, "Signed in successfully");
// Process avatar asynchronously (don't block the response) // Process avatar asynchronously (don't block the response) - only if image storage is configured
if let Some(avatar_url) = user.avatar_url.as_deref() { if let Some(image_storage) = &app_state.image_storage {
let image_storage = app_state.image_storage.clone(); if let Some(avatar_url) = user.avatar_url.as_deref() {
let user_public_id = user.id.clone(); let image_storage = image_storage.clone();
let avatar_url = avatar_url.to_string(); let user_public_id = user.id.clone();
debug!(%user_public_id, %avatar_url, "Processing avatar"); let avatar_url = avatar_url.to_string();
debug!(%user_public_id, %avatar_url, "Processing avatar");
tokio::spawn(async move { tokio::spawn(async move {
match image_storage.process_avatar(&user_public_id, &avatar_url).await { match image_storage.process_avatar(&user_public_id, &avatar_url).await {
Ok(avatar_urls) => { Ok(avatar_urls) => {
info!( info!(
user_id = %user_public_id, user_id = %user_public_id,
original_url = %avatar_urls.original_url, original_url = %avatar_urls.original_url,
mini_url = %avatar_urls.mini_url, mini_url = %avatar_urls.mini_url,
"Avatar processed successfully" "Avatar processed successfully"
); );
}
Err(e) => {
warn!(
user_id = %user_public_id,
avatar_url = %avatar_url,
error = %e,
"Failed to process avatar"
);
}
} }
Err(e) => { });
warn!( }
user_id = %user_public_id,
avatar_url = %avatar_url,
error = %e,
"Failed to process avatar"
);
}
}
});
} }
(StatusCode::FOUND, Redirect::to("/api/profile")).into_response() (StatusCode::FOUND, Redirect::to("/api/profile")).into_response()
@@ -182,6 +195,16 @@ pub async fn oauth_callback_handler(
/// ///
/// Requires the `session` cookie to be present. /// Requires the `session` cookie to be present.
pub async fn profile_handler(State(app_state): State<AppState>, cookie: CookieManager) -> axum::response::Response { pub async fn profile_handler(State(app_state): State<AppState>, cookie: CookieManager) -> axum::response::Response {
// Check if database is configured
if !app_state.database_configured {
return ErrorResponse::with_status(
StatusCode::SERVICE_UNAVAILABLE,
"database_not_configured",
Some("Database is not configured. Profile lookup requires a database.".into()),
)
.into_response();
}
let Some(token_str) = session::get_session_token(&cookie) else { let Some(token_str) = session::get_session_token(&cookie) else {
debug!("Missing session cookie"); debug!("Missing session cookie");
return ErrorResponse::unauthorized("missing session cookie").into_response(); return ErrorResponse::unauthorized("missing session cookie").into_response();
@@ -287,8 +310,17 @@ pub async fn health_handler(
app_state.check_health().await; app_state.check_health().await;
} }
let ok = app_state.health.read().await.ok(); let health = app_state.health.read().await;
let ok = health.ok();
let status = if ok { StatusCode::OK } else { StatusCode::SERVICE_UNAVAILABLE }; let status = if ok { StatusCode::OK } else { StatusCode::SERVICE_UNAVAILABLE };
let body = serde_json::json!({ "ok": ok });
// Include more details in the health response
let body = serde_json::json!({
"ok": ok,
"database_configured": app_state.database_configured,
"auth_providers": app_state.auth.len(),
"image_storage_enabled": app_state.image_storage.is_some(),
});
(status, axum::Json(body)).into_response() (status, axum::Json(body)).into_response()
} }
+58 -30
View File
@@ -3,7 +3,8 @@ use bon::builder;
use pacman_server::{ use pacman_server::{
app::{create_router, AppState}, app::{create_router, AppState},
auth::AuthRegistry, auth::AuthRegistry,
config::Config, config::{Config, DatabaseConfig, DiscordConfig, GithubConfig},
data::pool::{create_dummy_pool, create_pool},
}; };
use std::sync::{Arc, Once}; use std::sync::{Arc, Once};
use testcontainers::{ use testcontainers::{
@@ -23,12 +24,24 @@ pub struct TestContext {
pub config: Config, pub config: Config,
pub server: TestServer, pub server: TestServer,
pub app_state: AppState, pub app_state: AppState,
// Optional database // Optional database container (only for Postgres tests)
pub container: Option<ContainerAsync<GenericImage>>, pub container: Option<ContainerAsync<GenericImage>>,
} }
#[builder] #[builder]
pub async fn test_context(#[builder(default = false)] use_database: bool, auth_registry: Option<AuthRegistry>) -> TestContext { pub async fn test_context(
/// Whether to use a real PostgreSQL database via testcontainers (default: false)
#[builder(default = false)]
use_database: bool,
/// Optional custom AuthRegistry (otherwise built from config)
auth_registry: Option<AuthRegistry>,
/// Include Discord OAuth config (default: true for backward compatibility)
#[builder(default = true)]
with_discord: bool,
/// Include GitHub OAuth config (default: true for backward compatibility)
#[builder(default = true)]
with_github: bool,
) -> TestContext {
CRYPTO_INIT.call_once(|| { CRYPTO_INIT.call_once(|| {
rustls::crypto::ring::default_provider() rustls::crypto::ring::default_provider()
.install_default() .install_default()
@@ -38,7 +51,8 @@ pub async fn test_context(#[builder(default = false)] use_database: bool, auth_r
// Set up logging // Set up logging
std::env::set_var("RUST_LOG", "debug,sqlx=info"); std::env::set_var("RUST_LOG", "debug,sqlx=info");
pacman_server::logging::setup_logging(); pacman_server::logging::setup_logging();
let (database_url, container) = if use_database {
let (database_config, container) = if use_database {
let db = "testdb"; let db = "testdb";
let user = "testuser"; let user = "testuser";
let password = "testpass"; let password = "testpass";
@@ -59,47 +73,59 @@ pub async fn test_context(#[builder(default = false)] use_database: bool, auth_r
let port = container.get_host_port_ipv4(5432).await.unwrap(); let port = container.get_host_port_ipv4(5432).await.unwrap();
tracing::debug!(host = %host, port = %port, duration = ?duration, "Test database ready"); tracing::debug!(host = %host, port = %port, duration = ?duration, "Test database ready");
( let url = format!("postgresql://{user}:{password}@{host}:{port}/{db}?sslmode=disable");
Some(format!("postgresql://{user}:{password}@{host}:{port}/{db}?sslmode=disable")), (Some(DatabaseConfig { url }), Some(container))
Some(container),
)
} else { } else {
(None, None) (None, None)
}; };
// Build OAuth configs if requested
let discord = if with_discord {
Some(DiscordConfig {
client_id: "test_discord_client_id".to_string(),
client_secret: "test_discord_client_secret".to_string(),
})
} else {
None
};
let github = if with_github {
Some(GithubConfig {
client_id: "test_github_client_id".to_string(),
client_secret: "test_github_client_secret".to_string(),
})
} else {
None
};
let config = Config { let config = Config {
database_url: database_url.clone().unwrap_or_default(), database: database_config,
discord_client_id: "test_discord_client_id".to_string(), discord,
discord_client_secret: "test_discord_client_secret".to_string(), github,
github_client_id: "test_github_client_id".to_string(), s3: None, // Tests don't need S3
github_client_secret: "test_github_client_secret".to_string(), port: 0, // Will be set by test server
s3_access_key: "test_s3_access_key".to_string(),
s3_secret_access_key: "test_s3_secret_access_key".to_string(),
s3_bucket_name: "test_bucket".to_string(),
s3_public_base_url: "https://test.example.com".to_string(),
port: 0, // Will be set by test server
host: "127.0.0.1".parse().unwrap(), host: "127.0.0.1".parse().unwrap(),
shutdown_timeout_seconds: 5, shutdown_timeout_seconds: 5,
public_base_url: "http://localhost:3000".to_string(), public_base_url: "http://localhost:3000".to_string(),
jwt_secret: "test_jwt_secret_key_for_testing_only".to_string(), jwt_secret: "test_jwt_secret_key_for_testing_only".to_string(),
}; };
let db = if use_database { // Create database pool
let db = pacman_server::data::pool::create_pool(use_database, &database_url.unwrap(), 5).await; let db = if let Some(ref db_config) = config.database {
let pool = create_pool(false, &db_config.url, 5).await;
// Run migrations // Run migrations for Postgres
sqlx::migrate!("./migrations") sqlx::migrate!("./migrations")
.run(&db) .run(&pool)
.instrument(debug_span!("running_migrations")) .instrument(debug_span!("running_migrations"))
.await .await
.expect("Failed to run database migrations"); .expect("Failed to run database migrations");
debug!("Database migrations ran successfully"); debug!("Database migrations ran successfully");
db pool
} else { } else {
// Create a dummy database pool that will fail gracefully // Create dummy pool for tests that don't need database
let dummy_url = "postgresql://dummy:dummy@localhost:5432/dummy?sslmode=disable"; create_dummy_pool()
pacman_server::data::pool::create_pool(false, dummy_url, 1).await
}; };
// Create auth registry // Create auth registry
@@ -107,13 +133,15 @@ pub async fn test_context(#[builder(default = false)] use_database: bool, auth_r
// Create app state // Create app state
let notify = Arc::new(Notify::new()); let notify = Arc::new(Notify::new());
let app_state = AppState::new_with_database(config.clone(), auth, db, notify, use_database).await; let app_state = AppState::new_with_options(config.clone(), auth, db, notify, use_database).await;
// Set health status based on database usage // Set health status
{ {
let mut health = app_state.health.write().await; let mut health = app_state.health.write().await;
health.set_migrations(use_database); if use_database {
health.set_database(use_database); health.set_migrations(true);
health.set_database(true);
}
} }
let router = create_router(app_state.clone()); let router = create_router(app_state.clone());
+208
View File
@@ -0,0 +1,208 @@
//! Tests for optional configuration features
//!
//! These tests verify that:
//! 1. The server can start without database, Discord, GitHub, or S3 configured
//! 2. Partial configuration (e.g., only DISCORD_CLIENT_ID) fails with a clear error
//! 3. Routes behave correctly when features are disabled
mod common;
use axum::http::StatusCode;
use pretty_assertions::assert_eq;
use crate::common::{test_context, TestContext};
/// Test that the server starts and responds to health checks without any OAuth providers
#[tokio::test]
async fn test_server_without_oauth_providers() {
let TestContext { server, app_state, .. } = test_context()
.with_discord(false)
.with_github(false)
.use_database(false)
.call()
.await;
// Verify no providers registered
assert_eq!(app_state.auth.len(), 0);
// Health check should work
let response = server.get("/api/health").await;
assert_eq!(response.status_code(), StatusCode::OK);
// Providers endpoint should return empty list
let response = server.get("/api/auth/providers").await;
assert_eq!(response.status_code(), StatusCode::OK);
let body: Vec<serde_json::Value> = response.json();
assert!(body.is_empty());
}
/// Test that the server starts with only Discord configured
#[tokio::test]
async fn test_server_with_discord_only() {
let TestContext { server, app_state, .. } = test_context()
.with_discord(true)
.with_github(false)
.use_database(false)
.call()
.await;
// Verify only Discord is registered
assert_eq!(app_state.auth.len(), 1);
assert!(app_state.auth.get("discord").is_some());
assert!(app_state.auth.get("github").is_none());
// Providers endpoint should return only Discord
let response = server.get("/api/auth/providers").await;
assert_eq!(response.status_code(), StatusCode::OK);
let body: Vec<serde_json::Value> = response.json();
assert_eq!(body.len(), 1);
assert_eq!(body[0]["id"], "discord");
}
/// Test that the server starts with only GitHub configured
#[tokio::test]
async fn test_server_with_github_only() {
let TestContext { server, app_state, .. } = test_context()
.with_discord(false)
.with_github(true)
.use_database(false)
.call()
.await;
// Verify only GitHub is registered
assert_eq!(app_state.auth.len(), 1);
assert!(app_state.auth.get("github").is_some());
assert!(app_state.auth.get("discord").is_none());
// Providers endpoint should return only GitHub
let response = server.get("/api/auth/providers").await;
assert_eq!(response.status_code(), StatusCode::OK);
let body: Vec<serde_json::Value> = response.json();
assert_eq!(body.len(), 1);
assert_eq!(body[0]["id"], "github");
}
/// Test that the server starts without database configured
#[tokio::test]
async fn test_server_without_database() {
let TestContext {
server,
app_state,
config,
..
} = test_context().use_database(false).call().await;
// Verify database is not configured
assert!(config.database.is_none());
assert!(!app_state.database_configured);
// Health check should still work
let response = server.get("/api/health").await;
assert_eq!(response.status_code(), StatusCode::OK);
let body: serde_json::Value = response.json();
assert_eq!(body["ok"], true);
assert_eq!(body["database_configured"], false);
}
/// Test that profile endpoint returns 503 when database is not configured
#[tokio::test]
async fn test_profile_without_database_returns_503() {
let TestContext { server, .. } = test_context().use_database(false).call().await;
// Create a fake session cookie to get past the auth check
let response = server.get("/api/profile").await;
// Should return 503 Service Unavailable because database is not configured
assert_eq!(response.status_code(), StatusCode::SERVICE_UNAVAILABLE);
let body: serde_json::Value = response.json();
assert_eq!(body["error"], "database_not_configured");
}
/// Test that OAuth callback returns 503 when database is not configured
#[tokio::test]
async fn test_oauth_callback_without_database_returns_503() {
let TestContext { server, .. } = test_context().with_github(true).use_database(false).call().await;
// Try to complete OAuth flow - should fail because database is not configured
let response = server
.get("/api/auth/github/callback")
.add_query_param("code", "test_code")
.add_query_param("state", "test_state")
.await;
// Should return 503 Service Unavailable because database is not configured
assert_eq!(response.status_code(), StatusCode::SERVICE_UNAVAILABLE);
let body: serde_json::Value = response.json();
assert_eq!(body["error"], "database_not_configured");
}
/// Test that unknown provider returns 400
#[tokio::test]
async fn test_unknown_provider_returns_400() {
let TestContext { server, .. } = test_context().with_discord(true).use_database(false).call().await;
// Try to access non-existent provider
let response = server.get("/api/auth/twitter").await;
assert_eq!(response.status_code(), StatusCode::BAD_REQUEST);
let body: serde_json::Value = response.json();
assert_eq!(body["error"], "invalid_provider");
}
/// Test that logout works without database
#[tokio::test]
async fn test_logout_without_database() {
let TestContext { server, .. } = test_context().use_database(false).call().await;
// Logout should work even without database
let response = server.get("/api/logout").await;
// Logout redirects to home
assert_eq!(response.status_code(), StatusCode::FOUND);
}
/// Test basic routes work without database or OAuth
#[tokio::test]
async fn test_basic_routes_minimal_config() {
let TestContext { server, .. } = test_context()
.with_discord(false)
.with_github(false)
.use_database(false)
.call()
.await;
// Root API endpoint
let response = server.get("/api/").await;
assert_eq!(response.status_code(), StatusCode::OK);
// Health endpoint
let response = server.get("/api/health").await;
assert_eq!(response.status_code(), StatusCode::OK);
// Providers endpoint (empty list)
let response = server.get("/api/auth/providers").await;
assert_eq!(response.status_code(), StatusCode::OK);
}
/// Test health endpoint includes feature status
#[tokio::test]
async fn test_health_includes_feature_status() {
let TestContext { server, .. } = test_context()
.with_discord(true)
.with_github(false)
.use_database(false)
.call()
.await;
let response = server.get("/api/health").await;
assert_eq!(response.status_code(), StatusCode::OK);
let body: serde_json::Value = response.json();
assert_eq!(body["ok"], true);
assert_eq!(body["database_configured"], false);
assert_eq!(body["auth_providers"], 1); // Only Discord
assert_eq!(body["image_storage_enabled"], false); // No S3 configured
}
+4
View File
@@ -2,6 +2,10 @@ set shell := ["bash", "-c"]
binary_extension := if os() == "windows" { ".exe" } else { "" } binary_extension := if os() == "windows" { ".exe" } else { "" }
# Run cargo-vcpkg build for SDL2 dependencies
vcpkg:
cargo vcpkg build
# Run the game, pass args (e.g., `just pacman::run -r` for release) # Run the game, pass args (e.g., `just pacman::run -r` for release)
run *args: run *args:
cargo run -p pacman {{args}} cargo run -p pacman {{args}}