diff --git a/src/cache.rs b/src/cache.rs index 768733a..ff23b5a 100644 --- a/src/cache.rs +++ b/src/cache.rs @@ -188,8 +188,9 @@ impl IsrCache { /// Invalidate all cached entries pub async fn invalidate_all(&self) { + let previous_count = self.cache.entry_count(); self.cache.invalidate_all(); - tracing::info!("All cache entries invalidated"); + tracing::info!(previous_count, "All cache entries invalidated"); } /// Get cache statistics diff --git a/src/main.rs b/src/main.rs index 3df4cf7..a55d256 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,8 +1,9 @@ use clap::Parser; +use std::collections::HashSet; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use tower_http::{cors::CorsLayer, limit::RequestBodyLimitLayer, trace::TraceLayer}; +use tower_http::{cors::CorsLayer, limit::RequestBodyLimitLayer}; use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitExt}; mod assets; @@ -87,14 +88,43 @@ async fn main() { .await .expect("Failed to connect to database"); - // Run migrations on startup - tracing::info!("Running database migrations"); - sqlx::migrate!().run(&pool).await.unwrap_or_else(|e| { + // Check and run migrations on startup + let migrator = sqlx::migrate!(); + + // Query applied migrations directly from the database + let applied_versions: HashSet = + sqlx::query_scalar::<_, i64>("SELECT version FROM _sqlx_migrations ORDER BY version") + .fetch_all(&pool) + .await + .unwrap_or_default() + .into_iter() + .collect(); + + let pending: Vec<_> = migrator + .iter() + .filter(|m| !m.migration_type.is_down_migration()) + .filter(|m| !applied_versions.contains(&m.version)) + .map(|m| m.description.as_ref()) + .collect(); + + if pending.is_empty() { + let last_version = applied_versions.iter().max(); + let last_name = last_version + .and_then(|v| migrator.iter().find(|m| m.version == *v)) + .map(|m| m.description.as_ref()); + tracing::debug!(last_migration = ?last_name, "Database schema is current"); + } else { + tracing::warn!(migrations = ?pending, "Pending database migrations"); + } + + migrator.run(&pool).await.unwrap_or_else(|e| { tracing::error!(error = %e, "Migration failed"); std::process::exit(1); }); - tracing::info!("Migrations applied successfully"); + if !pending.is_empty() { + tracing::info!(count = pending.len(), "Migrations applied"); + } // Ensure admin user exists auth::ensure_admin_user(&pool) @@ -188,7 +218,6 @@ async fn main() { trust_request_id: Option, ) -> axum::Router> { router - .layer(TraceLayer::new_for_http()) .layer(RequestIdLayer::new(trust_request_id)) .layer(CorsLayer::permissive()) .layer(RequestBodyLimitLayer::new(1_048_576)) diff --git a/src/middleware/mod.rs b/src/middleware/mod.rs index 7b4ccd1..a3e9f52 100644 --- a/src/middleware/mod.rs +++ b/src/middleware/mod.rs @@ -1,3 +1,3 @@ pub mod request_id; -pub use request_id::RequestIdLayer; +pub use request_id::{RequestId, RequestIdLayer}; diff --git a/src/middleware/request_id.rs b/src/middleware/request_id.rs index 1fb0468..dcea680 100644 --- a/src/middleware/request_id.rs +++ b/src/middleware/request_id.rs @@ -1,7 +1,12 @@ use axum::{body::Body, extract::Request, http::HeaderName, response::Response}; use std::task::{Context, Poll}; +use std::time::Instant; use tower::{Layer, Service}; +/// Request ID stored in request extensions for downstream access +#[derive(Clone)] +pub struct RequestId(pub String); + #[derive(Clone)] pub struct RequestIdLayer { trust_header: Option, @@ -36,6 +41,7 @@ impl Service for RequestIdService where S: Service> + Send + 'static, S::Future: Send + 'static, + S::Error: std::fmt::Debug, { type Response = S::Response; type Error = S::Error; @@ -47,7 +53,7 @@ where self.inner.poll_ready(cx) } - fn call(&mut self, req: Request) -> Self::Future { + fn call(&mut self, mut req: Request) -> Self::Future { let req_id = self .trust_header .as_ref() @@ -61,13 +67,44 @@ where let span = tracing::info_span!("request", req_id = %req_id); let _enter = span.enter(); + let method = req.method().clone(); + let path = req.uri().path().to_string(); + let start = Instant::now(); + + tracing::debug!(method = %method, path = %path, "Request"); + + // Store request ID in extensions for downstream use (e.g., proxying to Bun) + req.extensions_mut().insert(RequestId(req_id)); + let span_clone = span.clone(); let future = self.inner.call(req); Box::pin(async move { let _enter = span_clone.enter(); - future.await + let result = future.await; + + let duration_ms = start.elapsed().as_millis() as u64; + + match &result { + Ok(response) => { + let status = response.status(); + match status.as_u16() { + 200..=399 => { + tracing::debug!(status = status.as_u16(), duration_ms, "Response") + } + 400..=499 => { + tracing::info!(status = status.as_u16(), duration_ms, "Response") + } + _ => tracing::warn!(status = status.as_u16(), duration_ms, "Response"), + } + } + Err(e) => { + tracing::error!(error = ?e, duration_ms, "Request failed"); + } + } + + result }) } } diff --git a/src/og.rs b/src/og.rs index 6db0c55..3f3c3a2 100644 --- a/src/og.rs +++ b/src/og.rs @@ -89,19 +89,19 @@ pub async fn regenerate_common_images(state: Arc) { // Wait 2 seconds before starting tokio::time::sleep(Duration::from_secs(2)).await; - tracing::info!("Ensuring common OG images exist"); + tracing::debug!("Checking common OG images"); let specs = vec![OGImageSpec::Index, OGImageSpec::Projects]; - for spec in specs { - match ensure_og_image(&spec, state.clone()).await { - Ok(()) => { - tracing::info!(r2_key = spec.r2_key(), "Common OG image ready"); - } + let mut ready = Vec::new(); + + for spec in &specs { + match ensure_og_image(spec, state.clone()).await { + Ok(()) => ready.push(spec.r2_key()), Err(e) => { - tracing::error!(r2_key = spec.r2_key(), error = %e, "Failed to ensure OG image"); + tracing::error!(r2_key = spec.r2_key(), error = %e, "OG image failed"); } } } - tracing::info!("Finished ensuring common OG images"); + tracing::info!(images = ?ready, "Common OG images ready"); } diff --git a/src/proxy.rs b/src/proxy.rs index abe49b1..d71e25e 100644 --- a/src/proxy.rs +++ b/src/proxy.rs @@ -15,7 +15,6 @@ use crate::{ }; /// ISR handler - serves pages through Bun SSR with caching and session validation -#[tracing::instrument(skip(state, req), fields(path = %req.uri().path(), method = %req.method()))] pub async fn isr_handler(State(state): State>, req: Request) -> Response { let method = req.method().clone(); let uri = req.uri(); @@ -69,7 +68,7 @@ pub async fn isr_handler(State(state): State>, req: Request) -> Re let is_head = method == axum::http::Method::HEAD; if path.starts_with("/api/") { - tracing::error!("API request reached ISR handler - routing bug!"); + tracing::error!(path = %path, "API request reached ISR handler - routing bug"); return (StatusCode::INTERNAL_SERVER_ERROR, "Internal routing error").into_response(); } @@ -104,6 +103,13 @@ pub async fn isr_handler(State(state): State>, req: Request) -> Re let mut forward_headers = HeaderMap::new(); let mut is_authenticated = false; + // Forward request ID to Bun (set by RequestIdLayer) + if let Some(request_id) = req.extensions().get::() { + if let Ok(header_value) = axum::http::HeaderValue::from_str(&request_id.0) { + forward_headers.insert("x-request-id", header_value); + } + } + // SECURITY: Strip any X-Session-User header from incoming request to prevent spoofing // Extract and validate session from cookie @@ -382,7 +388,7 @@ pub async fn perform_health_check( false } Err(_) => { - tracing::error!("Health check failed: timeout after 5s"); + tracing::error!(timeout_sec = 5, "Health check timed out"); false } }; diff --git a/web/entrypoint.ts b/web/entrypoint.ts index 04d879c..ee2693a 100644 --- a/web/entrypoint.ts +++ b/web/entrypoint.ts @@ -6,13 +6,20 @@ const API_SOCKET = "/tmp/api.sock"; const PORT = process.env.PORT || "8080"; const LOG_JSON = process.env.LOG_JSON || "true"; +function tryUnlink(path: string) { + try { + unlinkSync(path); + } catch (e) { + // ENOENT is expected (socket doesn't exist yet), other errors are unexpected + if (e instanceof Error && "code" in e && e.code !== "ENOENT") { + console.error(`Failed to cleanup ${path}: ${e.message}`); + } + } +} + function cleanup() { - try { - unlinkSync(BUN_SOCKET); - } catch {} - try { - unlinkSync(API_SOCKET); - } catch {} + tryUnlink(BUN_SOCKET); + tryUnlink(API_SOCKET); } // Cleanup on signals diff --git a/web/src/hooks.server.ts b/web/src/hooks.server.ts index 7dcf7d3..20c406c 100644 --- a/web/src/hooks.server.ts +++ b/web/src/hooks.server.ts @@ -1,6 +1,7 @@ import type { Handle, HandleServerError } from "@sveltejs/kit"; import { dev } from "$app/environment"; import { initLogger } from "$lib/logger"; +import { requestContext } from "$lib/server/context"; import { preCacheCollections } from "$lib/server/icons"; import { getLogger } from "@logtape/logtape"; import { minify } from "html-minifier-terser"; @@ -13,6 +14,18 @@ await preCacheCollections(); const logger = getLogger(["ssr", "error"]); export const handle: Handle = async ({ event, resolve }) => { + // Extract request ID from Rust proxy (should always be present in production) + const requestId = event.request.headers.get("x-request-id"); + if (!requestId) { + const reqLogger = getLogger(["ssr", "request"]); + reqLogger.warn( + "Missing x-request-id header - request not routed through Rust proxy", + { + path: event.url.pathname, + }, + ); + } + if ( dev && event.url.pathname === "/.well-known/appspecific/com.chrome.devtools.json" @@ -20,31 +33,33 @@ export const handle: Handle = async ({ event, resolve }) => { return new Response(undefined, { status: 404 }); } - const response = await resolve(event, { - transformPageChunk: !dev - ? ({ html }) => - minify(html, { - collapseBooleanAttributes: true, - collapseWhitespace: true, - conservativeCollapse: true, - decodeEntities: true, - html5: true, - ignoreCustomComments: [/^\[/], - minifyCSS: true, - minifyJS: true, - removeAttributeQuotes: true, - removeComments: true, - removeOptionalTags: true, - removeRedundantAttributes: true, - removeScriptTypeAttributes: true, - removeStyleLinkTypeAttributes: true, - sortAttributes: true, - sortClassName: true, - }) - : undefined, - }); + return requestContext.run({ requestId: requestId ?? undefined }, async () => { + const response = await resolve(event, { + transformPageChunk: !dev + ? ({ html }) => + minify(html, { + collapseBooleanAttributes: true, + collapseWhitespace: true, + conservativeCollapse: true, + decodeEntities: true, + html5: true, + ignoreCustomComments: [/^\[/], + minifyCSS: true, + minifyJS: true, + removeAttributeQuotes: true, + removeComments: true, + removeOptionalTags: true, + removeRedundantAttributes: true, + removeScriptTypeAttributes: true, + removeStyleLinkTypeAttributes: true, + sortAttributes: true, + sortClassName: true, + }) + : undefined, + }); - return response; + return response; + }); }; export const handleError: HandleServerError = async ({ diff --git a/web/src/lib/api.server.ts b/web/src/lib/api.server.ts index 00d962c..b7ea758 100644 --- a/web/src/lib/api.server.ts +++ b/web/src/lib/api.server.ts @@ -1,5 +1,6 @@ import { getLogger } from "@logtape/logtape"; import { env } from "$env/dynamic/private"; +import { requestContext } from "$lib/server/context"; const logger = getLogger(["ssr", "lib", "api"]); @@ -39,6 +40,15 @@ function createSmartFetch(upstreamUrl: string) { // Remove custom fetch property from options (not part of standard RequestInit) delete (fetchOptions as Record).fetch; + // Forward request ID to Rust API + const ctx = requestContext.getStore(); + if (ctx?.requestId) { + fetchOptions.headers = { + ...fetchOptions.headers, + "x-request-id": ctx.requestId, + }; + } + // Add Unix socket path if needed if (isUnixSocket) { fetchOptions.unix = upstreamUrl; diff --git a/web/src/lib/components/DiscordProfileModal.svelte b/web/src/lib/components/DiscordProfileModal.svelte index 1193cea..c870815 100644 --- a/web/src/lib/components/DiscordProfileModal.svelte +++ b/web/src/lib/components/DiscordProfileModal.svelte @@ -122,7 +122,10 @@
- {username} + {username}