feat: add Docker containerization with multi-stage build

Includes .dockerignore, Dockerfile with cargo-chef caching, and Justfile commands for building/running containerized app. Updates console-logger to support both JSON and pretty-printed logs based on LOG_JSON env var.
This commit is contained in:
2026-01-04 20:06:56 -06:00
parent edf271bcc6
commit 9de3c84f00
15 changed files with 238 additions and 244 deletions
+27
View File
@@ -0,0 +1,27 @@
# Ignore build artifacts and dev files
target/
web/node_modules/
web/.svelte-kit/
web/build/
# VCS
.git/
.gitignore
# IDE
.vscode/
.idea/
*.swp
*.swo
# Logs
*.log
# OS
.DS_Store
Thumbs.db
# Don't ignore these - we need them
!web/build/client
!web/build/server
!web/build/*.js
+135
View File
@@ -0,0 +1,135 @@
# ========== Stage 1: Cargo Chef Base ==========
FROM rust:1.91-alpine AS chef
WORKDIR /build
RUN apk add --no-cache musl-dev pkgconfig openssl-dev openssl-libs-static && \
cargo install cargo-chef --locked
# ========== Stage 2: Recipe Planner ==========
FROM chef AS planner
COPY Cargo.toml Cargo.lock ./
COPY src/ ./src/
RUN cargo chef prepare --recipe-path recipe.json
# ========== Stage 3: Rust Builder ==========
FROM chef AS builder
# Cook dependencies (cached until Cargo.toml/Cargo.lock change)
COPY --from=planner /build/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
# Copy source and build
COPY Cargo.toml Cargo.lock ./
COPY src/ ./src/
# Create placeholder for embedded assets (will be replaced in final build)
RUN mkdir -p web/build/client && \
echo "placeholder" > web/build/client/.gitkeep
RUN cargo build --release && \
strip target/release/api
# ========== Stage 4: Frontend Builder ==========
FROM oven/bun:1 AS frontend
WORKDIR /build
# Install dependencies (cached until package.json/bun.lock change)
COPY web/package.json web/bun.lock ./
RUN bun install --frozen-lockfile
# Build frontend
COPY web/ ./
RUN bun run build
# ========== Stage 5: Final Rust Build (with embedded assets) ==========
FROM chef AS final-builder
# Cook dependencies (cached from earlier)
COPY --from=planner /build/recipe.json recipe.json
RUN cargo chef cook --release --recipe-path recipe.json
# Copy source
COPY Cargo.toml Cargo.lock ./
COPY src/ ./src/
# Copy frontend client assets for embedding
COPY --from=frontend /build/build/client ./web/build/client
# Build with real assets
RUN cargo build --release && \
strip target/release/api
# ========== Stage 6: Runtime ==========
FROM oven/bun:1-alpine AS runtime
WORKDIR /app
# Install runtime dependencies
RUN apk add --no-cache ca-certificates tzdata
# Copy Rust binary
COPY --from=final-builder /build/target/release/api ./api
# Copy Bun SSR server
COPY --from=frontend /build/build/server ./web/build/server
COPY --from=frontend /build/build/*.js ./web/build/
COPY web/console-logger.js ./web/
# Create inline entrypoint script
RUN cat > /entrypoint.sh << 'EOF'
#!/bin/sh
set -e
cleanup() {
kill "$BUN_PID" "$RUST_PID" 2>/dev/null || true
rm -f /tmp/api.sock /tmp/bun.sock
exit 0
}
trap cleanup SIGTERM SIGINT
# Start Bun SSR (propagate LOG_JSON to Bun process)
cd /app/web/build
SOCKET_PATH=/tmp/bun.sock LOG_JSON="${LOG_JSON}" bun --preload /app/web/console-logger.js index.js &
BUN_PID=$!
# Wait for Bun socket
timeout=50
while [ ! -S /tmp/bun.sock ] && [ $timeout -gt 0 ]; do
sleep 0.1
timeout=$((timeout - 1))
done
if [ ! -S /tmp/bun.sock ]; then
echo "ERROR: Bun failed to create socket within 5s"
exit 1
fi
# Start Rust server
# Note: [::] binds to both IPv4 and IPv6 on Linux
/app/api \
--listen "[::]:${PORT:-8080}" \
--listen /tmp/api.sock \
--downstream /tmp/bun.sock &
RUST_PID=$!
# Wait for either process to exit
wait -n "$BUN_PID" "$RUST_PID" 2>/dev/null || wait "$BUN_PID" "$RUST_PID"
cleanup
EOF
RUN chmod +x /entrypoint.sh
# Environment configuration
# RUST_LOG - optional, overrides LOG_LEVEL with full tracing filter syntax
# LOG_JSON - defaults to true in Docker, false outside
ENV PORT=8080 \
LOG_LEVEL=info \
LOG_JSON=true \
TZ=Etc/UTC
EXPOSE 8080
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD wget -q --spider http://localhost:${PORT}/api/health || exit 1
ENTRYPOINT ["/entrypoint.sh"]
+25 -17
View File
@@ -1,26 +1,34 @@
default:
just --list
dev:
just dev-json | hl --config .hl.config.toml -P
dev-json:
LOG_JSON=true UPSTREAM_URL=/tmp/xevion-api.sock bunx concurrently --raw --prefix none "bun run --silent --cwd web dev --port 5173" "cargo watch --quiet --exec 'run --quiet -- --listen localhost:8080 --listen /tmp/xevion-api.sock --downstream http://localhost:5173'"
setup:
bun install --cwd web
cargo build
build:
bun run --cwd web build
cargo build --release
serve:
LOG_JSON=true bunx concurrently --raw --prefix none "SOCKET_PATH=/tmp/xevion-bun.sock bun --preload ../console-logger.js --silent --cwd web/build index.js" "target/release/api --listen localhost:8080 --listen /tmp/xevion-api.sock --downstream /tmp/xevion-bun.sock"
check:
bun run --cwd web format
bun run --cwd web lint
bun run --cwd web check
cargo clippy --all-targets
cargo fmt --check
build:
bun run --cwd web build
cargo build --release
dev:
just dev-json | hl --config .hl.config.toml -P
dev-json:
LOG_JSON=true UPSTREAM_URL=/tmp/xevion-api.sock bunx concurrently --raw --prefix none "bun run --silent --cwd web dev --port 5173" "cargo watch --quiet --exec 'run --quiet -- --listen localhost:8080 --listen /tmp/xevion-api.sock --downstream http://localhost:5173'"
serve:
just serve-json | hl --config .hl.config.toml -P
serve-json:
LOG_JSON=true bunx concurrently --raw --prefix none "SOCKET_PATH=/tmp/xevion-bun.sock bun --preload ../console-logger.js --silent --cwd web/build index.js" "target/release/api --listen localhost:8080 --listen /tmp/xevion-api.sock --downstream /tmp/xevion-bun.sock"
docker-image:
docker build -t xevion-dev .
docker-run port="8080":
just docker-run-json {{port}} | hl --config .hl.config.toml -P
docker-run-json port="8080":
docker run -p {{port}}:8080 xevion-dev
-7
View File
@@ -4,16 +4,11 @@ use axum::{
};
use include_dir::{include_dir, Dir};
/// Embedded client assets from the SvelteKit build
/// These are the static JS/CSS bundles that get served to browsers
static CLIENT_ASSETS: Dir = include_dir!("$CARGO_MANIFEST_DIR/web/build/client");
/// Serves embedded client assets from the /_app path
/// Returns 404 if the asset doesn't exist
pub async fn serve_embedded_asset(uri: Uri) -> Response {
let path = uri.path();
// Strip leading slash for lookup
let asset_path = path.strip_prefix('/').unwrap_or(path);
match CLIENT_ASSETS.get_file(asset_path) {
@@ -31,14 +26,12 @@ pub async fn serve_embedded_asset(uri: Uri) -> Response {
}),
);
// Immutable assets can be cached forever (they're content-hashed)
if path.contains("/immutable/") {
headers.insert(
header::CACHE_CONTROL,
header::HeaderValue::from_static("public, max-age=31536000, immutable"),
);
} else {
// Version file and other assets get short cache
headers.insert(
header::CACHE_CONTROL,
header::HeaderValue::from_static("public, max-age=3600"),
+10 -25
View File
@@ -3,28 +3,20 @@ use std::net::{SocketAddr, ToSocketAddrs};
use std::path::PathBuf;
use std::str::FromStr;
/// Server configuration parsed from CLI arguments and environment variables
#[derive(Parser, Debug)]
#[command(name = "api")]
#[command(about = "xevion.dev API server with ISR caching", long_about = None)]
pub struct Args {
/// Address(es) to listen on. Can be host:port, :port, or Unix socket path.
/// Can be specified multiple times.
/// Examples: :8080, 0.0.0.0:8080, [::]:8080, /tmp/api.sock
#[arg(long, env = "LISTEN_ADDR", value_delimiter = ',', required = true)]
pub listen: Vec<ListenAddr>,
/// Downstream Bun SSR server URL or Unix socket path
/// Examples: http://localhost:5173, /tmp/bun.sock
#[arg(long, env = "DOWNSTREAM_URL", required = true)]
pub downstream: String,
/// Optional header name to trust for request IDs (e.g., X-Railway-Request-Id)
#[arg(long, env = "TRUST_REQUEST_ID")]
pub trust_request_id: Option<String>,
}
/// Address to listen on - either TCP or Unix socket
#[derive(Debug, Clone)]
pub enum ListenAddr {
Tcp(SocketAddr),
@@ -35,12 +27,10 @@ impl FromStr for ListenAddr {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Unix socket: starts with / or ./
if s.starts_with('/') || s.starts_with("./") {
return Ok(ListenAddr::Unix(PathBuf::from(s)));
}
// Shorthand :port -> 127.0.0.1:port
if let Some(port_str) = s.strip_prefix(':') {
let port: u16 = port_str
.parse()
@@ -48,23 +38,18 @@ impl FromStr for ListenAddr {
return Ok(ListenAddr::Tcp(SocketAddr::from(([127, 0, 0, 1], port))));
}
// Try parsing as a socket address (handles both IPv4 and IPv6)
// This supports formats like: 0.0.0.0:8080, [::]:8080, 192.168.1.1:3000
match s.parse::<SocketAddr>() {
Ok(addr) => Ok(ListenAddr::Tcp(addr)),
Err(_) => {
// Try resolving as hostname:port
match s.to_socket_addrs() {
Ok(mut addrs) => addrs
.next()
.ok_or_else(|| format!("Could not resolve address: {}", s))
.map(ListenAddr::Tcp),
Err(_) => Err(format!(
"Invalid address '{}'. Expected host:port, :port, or Unix socket path",
s
)),
}
}
Err(_) => match s.to_socket_addrs() {
Ok(mut addrs) => addrs
.next()
.ok_or_else(|| format!("Could not resolve address: {}", s))
.map(ListenAddr::Tcp),
Err(_) => Err(format!(
"Invalid address '{}'. Expected host:port, :port, or Unix socket path",
s
)),
},
}
}
}
-41
View File
@@ -1,5 +1,3 @@
//! Custom tracing formatter for Railway-compatible structured logging
use nu_ansi_term::Color;
use serde::Serialize;
use serde_json::{Map, Value};
@@ -12,17 +10,9 @@ use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields};
use tracing_subscriber::registry::LookupSpan;
/// Cached format description for timestamps with 3 subsecond digits (milliseconds)
const TIMESTAMP_FORMAT: &[FormatItem<'static>] =
format_description!("[hour]:[minute]:[second].[subsecond digits:3]");
/// A custom formatter with enhanced timestamp formatting and colored output
///
/// Provides human-readable output for local development with:
/// - Colored log levels
/// - Timestamp with millisecond precision
/// - Span context with hierarchy
/// - Clean field formatting
pub struct CustomPrettyFormatter;
impl<S, N> FormatEvent<S, N> for CustomPrettyFormatter
@@ -38,7 +28,6 @@ where
) -> fmt::Result {
let meta = event.metadata();
// 1) Timestamp (dimmed when ANSI)
let now = OffsetDateTime::now_utc();
let formatted_time = now.format(&TIMESTAMP_FORMAT).map_err(|e| {
eprintln!("Failed to format timestamp: {}", e);
@@ -47,11 +36,9 @@ where
write_dimmed(&mut writer, formatted_time)?;
writer.write_char(' ')?;
// 2) Colored 5-char level
write_colored_level(&mut writer, meta.level())?;
writer.write_char(' ')?;
// 3) Span scope chain (bold names, fields in braces, dimmed ':')
if let Some(scope) = ctx.event_scope() {
let mut saw_any = false;
for span in scope.from_root() {
@@ -76,37 +63,18 @@ where
}
}
// 4) Target (dimmed), then a space
if writer.has_ansi_escapes() {
write!(writer, "{}: ", Color::DarkGray.paint(meta.target()))?;
} else {
write!(writer, "{}: ", meta.target())?;
}
// 5) Event fields
ctx.format_fields(writer.by_ref(), event)?;
// 6) Newline
writeln!(writer)
}
}
/// A custom JSON formatter that flattens fields to root level for Railway
///
/// Outputs logs in Railway-compatible format:
/// ```json
/// {
/// "message": "...",
/// "level": "...",
/// "target": "...",
/// "customAttribute": "..."
/// }
/// ```
///
/// This format allows Railway to:
/// - Parse the `message` field correctly
/// - Filter by `level` and custom attributes using `@attribute:value`
/// - Preserve multi-line logs like stack traces
pub struct CustomJsonFormatter;
impl<S, N> FormatEvent<S, N> for CustomJsonFormatter
@@ -196,21 +164,14 @@ where
};
event.record(&mut visitor);
// Collect span information from the span hierarchy
// Flatten all span fields directly into root level
if let Some(scope) = ctx.event_scope() {
for span in scope.from_root() {
// Extract span fields by parsing the stored extension data
// The fields are stored as a formatted string, so we need to parse them
let ext = span.extensions();
if let Some(formatted_fields) = ext.get::<FormattedFields<N>>() {
let field_str = formatted_fields.fields.as_str();
// Parse key=value pairs from the formatted string
// Format is typically: key=value key2=value2
for pair in field_str.split_whitespace() {
if let Some((key, value)) = pair.split_once('=') {
// Remove quotes if present
let value = value.trim_matches('"').trim_matches('\'');
fields.insert(key.to_string(), Value::String(value.to_string()));
}
@@ -240,7 +201,6 @@ where
}
}
/// Write the verbosity level with colored output
fn write_colored_level(writer: &mut Writer<'_>, level: &Level) -> fmt::Result {
if writer.has_ansi_escapes() {
let colored = match *level {
@@ -252,7 +212,6 @@ fn write_colored_level(writer: &mut Writer<'_>, level: &Level) -> fmt::Result {
};
write!(writer, "{}", colored)
} else {
// Right-pad to width 5 for alignment
match *level {
Level::TRACE => write!(writer, "{:>5}", "TRACE"),
Level::DEBUG => write!(writer, "{:>5}", "DEBUG"),
+4 -65
View File
@@ -26,13 +26,9 @@ fn init_tracing() {
.map(|v| v == "true" || v == "1")
.unwrap_or(false);
// Build the EnvFilter
// Priority: RUST_LOG > LOG_LEVEL > default
let filter = if let Ok(rust_log) = std::env::var("RUST_LOG") {
// RUST_LOG overwrites everything
EnvFilter::new(rust_log)
} else {
// Get LOG_LEVEL for our crate, default based on build profile
let our_level = std::env::var("LOG_LEVEL").unwrap_or_else(|_| {
if cfg!(debug_assertions) {
"debug".to_string()
@@ -41,7 +37,6 @@ fn init_tracing() {
}
});
// Default other crates to WARN, our crate to LOG_LEVEL
EnvFilter::new(format!("warn,api={}", our_level))
};
@@ -52,7 +47,7 @@ fn init_tracing() {
tracing_subscriber::fmt::layer()
.event_format(CustomJsonFormatter)
.fmt_fields(tracing_subscriber::fmt::format::DefaultFields::new())
.with_ansi(false), // Disable ANSI codes in JSON mode
.with_ansi(false),
)
.init();
} else {
@@ -65,26 +60,19 @@ fn init_tracing() {
#[tokio::main]
async fn main() {
// Initialize tracing with configurable format and levels
init_tracing();
// Parse CLI arguments and environment variables
let args = Args::parse();
// Validate we have at least one listen address
if args.listen.is_empty() {
eprintln!("Error: At least one --listen address is required");
std::process::exit(1);
}
// Create shared application state
let state = Arc::new(AppState {
downstream_url: args.downstream.clone(),
});
// Build router with shared state
// Note: Axum's nest() handles /api/* but not /api or /api/ at the parent level
// So we explicitly add those routes before nesting
let app = Router::new()
.nest("/api", api_routes())
.route("/api/", any(api_root_404_handler))
@@ -93,10 +81,9 @@ async fn main() {
.layer(TraceLayer::new_for_http())
.layer(RequestIdLayer::new(args.trust_request_id.clone()))
.layer(CorsLayer::permissive())
.layer(RequestBodyLimitLayer::new(1_048_576)) // 1MB request body limit
.layer(RequestBodyLimitLayer::new(1_048_576))
.with_state(state);
// Spawn a listener for each address
let mut tasks = Vec::new();
for listen_addr in &args.listen {
@@ -110,7 +97,6 @@ async fn main() {
.await
.expect("Failed to bind TCP listener");
// Format as clickable URL
let url = if addr.is_ipv6() {
format!("http://[{}]:{}", addr.ip(), addr.port())
} else {
@@ -123,7 +109,6 @@ async fn main() {
.expect("Server error on TCP listener");
}
ListenAddr::Unix(path) => {
// Remove existing socket file if it exists
let _ = std::fs::remove_file(&path);
let listener = tokio::net::UnixListener::bind(&path)
@@ -140,22 +125,18 @@ async fn main() {
tasks.push(task);
}
// Wait for all listeners (this will run forever unless interrupted)
for task in tasks {
task.await.expect("Listener task panicked");
}
}
/// Shared application state
#[derive(Clone)]
struct AppState {
downstream_url: String,
}
/// Custom error type for proxy operations
#[derive(Debug)]
enum ProxyError {
/// Network error (connection failed, timeout, etc.)
Network(reqwest::Error),
}
@@ -169,7 +150,6 @@ impl std::fmt::Display for ProxyError {
impl std::error::Error for ProxyError {}
/// Check if a path represents a static asset that should be logged at TRACE level
fn is_static_asset(path: &str) -> bool {
path.starts_with("/node_modules/")
|| path.starts_with("/@") // Vite internals like /@vite/client, /@fs/, /@id/
@@ -188,15 +168,13 @@ fn is_static_asset(path: &str) -> bool {
|| path.ends_with(".map")
}
/// Check if a path represents a page route (heuristic: no file extension)
fn is_page_route(path: &str) -> bool {
!path.starts_with("/node_modules/")
&& !path.starts_with("/@")
&& !path.starts_with("/.svelte-kit/")
&& !path.contains('.') // Simple heuristic: no extension = likely a page
&& !path.contains('.')
}
// API routes for data endpoints
fn api_routes() -> Router<Arc<AppState>> {
Router::new()
.route("/", any(api_root_404_handler))
@@ -205,30 +183,25 @@ fn api_routes() -> Router<Arc<AppState>> {
.fallback(api_404_and_method_handler)
}
// API root 404 handler - explicit 404 for /api and /api/ requests
async fn api_root_404_handler(uri: axum::http::Uri) -> impl IntoResponse {
api_404_handler(uri).await
}
// Health check endpoint
async fn health_handler() -> impl IntoResponse {
(StatusCode::OK, "OK")
}
// API 404 and method handler - catches unmatched /api/* routes and validates methods/content-type
async fn api_404_and_method_handler(req: Request) -> impl IntoResponse {
let method = req.method();
let uri = req.uri();
let path = uri.path();
// For non-GET/HEAD requests, validate Content-Type
if method != axum::http::Method::GET && method != axum::http::Method::HEAD && method != axum::http::Method::OPTIONS {
let content_type = req.headers()
.get(axum::http::header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok());
if let Some(ct) = content_type {
// Only accept application/json for request bodies
if !ct.starts_with("application/json") {
return (
StatusCode::UNSUPPORTED_MEDIA_TYPE,
@@ -261,9 +234,7 @@ async fn api_404_and_method_handler(req: Request) -> impl IntoResponse {
).into_response()
}
// Simple 404 handler for /api and /api/ that delegates to the main handler
async fn api_404_handler(uri: axum::http::Uri) -> impl IntoResponse {
// Create a minimal request for the handler
let req = Request::builder()
.uri(uri)
.body(axum::body::Body::empty())
@@ -272,7 +243,6 @@ async fn api_404_handler(uri: axum::http::Uri) -> impl IntoResponse {
api_404_and_method_handler(req).await
}
// Project data structure
#[derive(Debug, Clone, Serialize, Deserialize)]
struct ProjectLink {
url: String,
@@ -291,7 +261,6 @@ struct Project {
links: Vec<ProjectLink>,
}
// Projects endpoint - returns hardcoded project data for now
async fn projects_handler() -> impl IntoResponse {
let projects = vec![
Project {
@@ -325,8 +294,6 @@ async fn projects_handler() -> impl IntoResponse {
Json(projects)
}
// ISR handler - proxies to Bun SSR server
// This is the fallback for all routes not matched by /api/*
#[tracing::instrument(skip(state, req), fields(path = %req.uri().path(), method = %req.method()))]
async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Response {
let method = req.method().clone();
@@ -334,7 +301,6 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
let path = uri.path();
let query = uri.query().unwrap_or("");
// Only allow GET and HEAD requests outside of /api routes
if method != axum::http::Method::GET && method != axum::http::Method::HEAD {
tracing::warn!(method = %method, path = %path, "Non-GET/HEAD request to non-API route");
let mut headers = HeaderMap::new();
@@ -350,10 +316,8 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
.into_response();
}
// For HEAD requests, we'll still proxy to Bun but strip the body later
let is_head = method == axum::http::Method::HEAD;
// Check if API route somehow reached ISR handler (shouldn't happen)
if path.starts_with("/api/") {
tracing::error!("API request reached ISR handler - routing bug!");
return (
@@ -363,18 +327,13 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
.into_response();
}
// Build URL for Bun server
// For unix sockets, use http://localhost + path (socket is configured in client)
// For TCP, use the actual downstream URL
let bun_url = if state.downstream_url.starts_with('/') || state.downstream_url.starts_with("./") {
// Unix socket - host is ignored, just need the path
if query.is_empty() {
format!("http://localhost{}", path)
} else {
format!("http://localhost{}?{}", path, query)
}
} else {
// TCP - use the actual downstream URL
if query.is_empty() {
format!("{}{}", state.downstream_url, path)
} else {
@@ -382,27 +341,20 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
}
};
// Track request timing
let start = std::time::Instant::now();
// TODO: Add ISR caching layer here (moka, singleflight, stale-while-revalidate)
// For now, just proxy directly to Bun
match proxy_to_bun(&bun_url, state.clone()).await {
Ok((status, headers, body)) => {
let duration_ms = start.elapsed().as_millis() as u64;
let cache = "miss"; // Hardcoded for now, will change when caching is implemented
let cache = "miss";
// Intelligent logging based on path type and status
let is_static = is_static_asset(path);
let is_page = is_page_route(path);
match (status.as_u16(), is_static, is_page) {
// Static assets - success at TRACE
(200..=299, true, _) => {
tracing::trace!(status = status.as_u16(), duration_ms, cache, "ISR request");
}
// Static assets - 404 at WARN
(404, true, _) => {
tracing::warn!(
status = status.as_u16(),
@@ -411,7 +363,6 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
"ISR request - missing asset"
);
}
// Static assets - server error at ERROR
(500..=599, true, _) => {
tracing::error!(
status = status.as_u16(),
@@ -420,13 +371,10 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
"ISR request - server error"
);
}
// Page routes - success at DEBUG
(200..=299, _, true) => {
tracing::debug!(status = status.as_u16(), duration_ms, cache, "ISR request");
}
// Page routes - 404 silent (normal case for non-existent pages)
(404, _, true) => {}
// Page routes - server error at ERROR
(500..=599, _, _) => {
tracing::error!(
status = status.as_u16(),
@@ -435,13 +383,11 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
"ISR request - server error"
);
}
// Default fallback - DEBUG
_ => {
tracing::debug!(status = status.as_u16(), duration_ms, cache, "ISR request");
}
}
// Forward response, but strip body for HEAD requests
if is_head {
(status, headers).into_response()
} else {
@@ -465,34 +411,27 @@ async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Respon
}
}
// Proxy a request to the Bun SSR server, returning status, headers and body
async fn proxy_to_bun(
url: &str,
state: Arc<AppState>,
) -> Result<(StatusCode, HeaderMap, String), ProxyError> {
// Build client - if downstream_url is a path, use unix socket, otherwise TCP
let client = if state.downstream_url.starts_with('/') || state.downstream_url.starts_with("./") {
// Unix socket - the host in the URL (localhost) is ignored
let path = PathBuf::from(&state.downstream_url);
reqwest::Client::builder()
.unix_socket(path)
.build()
.map_err(ProxyError::Network)?
} else {
// Regular TCP connection
reqwest::Client::new()
};
let response = client.get(url).send().await.map_err(ProxyError::Network)?;
// Extract status code
let status = StatusCode::from_u16(response.status().as_u16())
.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR);
// Convert reqwest headers to axum HeaderMap
let mut headers = HeaderMap::new();
for (name, value) in response.headers() {
// Skip hop-by-hop headers and content-length (axum will recalculate it)
let name_str = name.as_str();
if name_str == "transfer-encoding"
|| name_str == "connection"
-11
View File
@@ -1,5 +1,3 @@
//! Request ID middleware for distributed tracing and correlation
use axum::{
body::Body,
extract::Request,
@@ -9,15 +7,12 @@ use axum::{
use std::task::{Context, Poll};
use tower::{Layer, Service};
/// Layer that creates request ID spans for all requests
#[derive(Clone)]
pub struct RequestIdLayer {
/// Optional header name to trust for request IDs
trust_header: Option<HeaderName>,
}
impl RequestIdLayer {
/// Create a new request ID layer
pub fn new(trust_header: Option<String>) -> Self {
Self {
trust_header: trust_header.and_then(|h| h.parse().ok()),
@@ -36,7 +31,6 @@ impl<S> Layer<S> for RequestIdLayer {
}
}
/// Service that extracts or generates request IDs and creates tracing spans
#[derive(Clone)]
pub struct RequestIdService<S> {
inner: S,
@@ -57,7 +51,6 @@ where
}
fn call(&mut self, req: Request) -> Self::Future {
// Extract or generate request ID
let req_id = self
.trust_header
.as_ref()
@@ -66,18 +59,14 @@ where
.map(|s| s.to_string())
.unwrap_or_else(|| ulid::Ulid::new().to_string());
// Create a tracing span for this request
let span = tracing::info_span!("request", req_id = %req_id);
let _enter = span.enter();
// Clone span for the future
let span_clone = span.clone();
// Call the inner service
let future = self.inner.call(req);
Box::pin(async move {
// Execute the future within the span
let _enter = span_clone.enter();
future.await
})
+33 -18
View File
@@ -1,6 +1,3 @@
// Patch console methods to output structured JSON logs
// This runs before the Bun server starts to ensure all console output is formatted
const originalConsole = {
log: console.log,
error: console.error,
@@ -9,23 +6,41 @@ const originalConsole = {
debug: console.debug,
};
const useJson = process.env.LOG_JSON === "true" || process.env.LOG_JSON === "1";
function formatLog(level, args) {
const message = args.map(arg =>
typeof arg === 'object' ? JSON.stringify(arg) : String(arg)
).join(' ');
const message = args
.map((arg) => (typeof arg === "object" ? JSON.stringify(arg) : String(arg)))
.join(" ");
const logEntry = {
timestamp: new Date().toISOString(),
level: level,
message: message,
target: 'bun',
};
if (useJson) {
const logEntry = {
timestamp: new Date().toISOString(),
level: level,
message: message,
target: "bun",
};
originalConsole.log(JSON.stringify(logEntry));
} else {
const timestamp = new Date().toISOString().split("T")[1].slice(0, 12);
const levelColors = {
debug: "\x1b[36m", // cyan
info: "\x1b[32m", // green
warn: "\x1b[33m", // yellow
error: "\x1b[31m", // red
};
const color = levelColors[level] || "";
const reset = "\x1b[0m";
const gray = "\x1b[90m";
originalConsole.log(JSON.stringify(logEntry));
originalConsole.log(
`${gray}${timestamp}${reset} ${color}${level.toUpperCase().padEnd(5)}${reset} ${gray}bun${reset}: ${message}`,
);
}
}
console.log = (...args) => formatLog('info', args);
console.info = (...args) => formatLog('info', args);
console.warn = (...args) => formatLog('warn', args);
console.error = (...args) => formatLog('error', args);
console.debug = (...args) => formatLog('debug', args);
console.log = (...args) => formatLog("info", args);
console.info = (...args) => formatLog("info", args);
console.warn = (...args) => formatLog("warn", args);
console.error = (...args) => formatLog("error", args);
console.debug = (...args) => formatLog("debug", args);
-3
View File
@@ -3,13 +3,11 @@ import { dev } from "$app/environment";
import { initLogger } from "$lib/logger";
import { getLogger } from "@logtape/logtape";
// Initialize logger on server startup
await initLogger();
const logger = getLogger(["ssr", "error"]);
export const handle: Handle = async ({ event, resolve }) => {
// Handle DevTools request silently to prevent console.log spam
if (
dev &&
event.url.pathname === "/.well-known/appspecific/com.chrome.devtools.json"
@@ -26,7 +24,6 @@ export const handleError: HandleServerError = async ({
status,
message,
}) => {
// Use structured logging via LogTape instead of console.error
logger.error(message, {
status,
method: event.request.method,
-11
View File
@@ -3,20 +3,11 @@ import { env } from "$env/dynamic/private";
const logger = getLogger(["ssr", "lib", "api"]);
// Compute upstream configuration once at module load
const upstreamUrl = env.UPSTREAM_URL;
const isUnixSocket =
upstreamUrl?.startsWith("/") || upstreamUrl?.startsWith("./");
const baseUrl = isUnixSocket ? "http://localhost" : upstreamUrl;
/**
* Fetch utility for calling the Rust backend API.
* Automatically prefixes requests with the upstream URL from environment.
* Supports both HTTP URLs and Unix socket paths.
*
* Connection pooling and keep-alive are handled automatically by Bun.
* Default timeout is 30 seconds unless overridden via init.signal.
*/
export async function apiFetch<T>(
path: string,
init?: RequestInit,
@@ -29,10 +20,8 @@ export async function apiFetch<T>(
const url = `${baseUrl}${path}`;
const method = init?.method ?? "GET";
// Build fetch options with 30s default timeout and unix socket support
const fetchOptions: RequestInit & { unix?: string } = {
...init,
// Respect caller-provided signal, otherwise default to 30s timeout
signal: init?.signal ?? AbortSignal.timeout(30_000),
};
-4
View File
@@ -195,7 +195,6 @@
float closestRad = 0.0;
float pointOpacity = 0.0;
// Check 9 neighboring grid points
for (float dx = -1.0; dx <= 1.0; dx += 1.0) {
for (float dy = -1.0; dy <= 1.0; dy += 1.0) {
vec2 testGrid = gridCoord + vec2(dx * spacing, dy * spacing);
@@ -296,7 +295,6 @@
const { gl, program } = context;
// Setup fullscreen quad geometry
const positionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
gl.bufferData(
@@ -309,7 +307,6 @@
gl.enableVertexAttribArray(positionLocation);
gl.vertexAttribPointer(positionLocation, 2, gl.FLOAT, false, 0, 0);
// Setup uniform manager
const uniforms = new UniformManager(gl, program, [
"u_resolution",
"u_time",
@@ -335,7 +332,6 @@
const dpr = window.devicePixelRatio || 1;
// Set static uniforms
uniforms.setStatic({
u_seed: Math.random() * 1000,
u_dpr: dpr,
-20
View File
@@ -8,15 +8,6 @@ interface RailwayLogEntry {
[key: string]: unknown;
}
/**
* Custom formatter that outputs Railway-compatible JSON logs.
* Format: { timestamp, level, message, target, ...attributes }
*
* The target field is constructed from the logger category:
* - ["ssr"] -> "ssr"
* - ["ssr", "routes"] -> "ssr:routes"
* - ["ssr", "api", "auth"] -> "ssr:api:auth"
*/
function railwayFormatter(record: LogRecord): string {
const entry: RailwayLogEntry = {
timestamp: new Date().toISOString(),
@@ -25,7 +16,6 @@ function railwayFormatter(record: LogRecord): string {
target: record.category.join(":"),
};
// Flatten properties to root level (custom attributes)
if (record.properties && Object.keys(record.properties).length > 0) {
Object.assign(entry, record.properties);
}
@@ -33,18 +23,12 @@ function railwayFormatter(record: LogRecord): string {
return JSON.stringify(entry) + "\n";
}
/**
* Initialize LogTape with Railway-compatible JSON logging.
* Only outputs logs when LOG_JSON=true or LOG_JSON=1 is set.
* Safe to call multiple times (idempotent - will silently skip if already configured).
*/
export async function initLogger() {
const useJsonLogs =
process.env.LOG_JSON === "true" || process.env.LOG_JSON === "1";
try {
if (!useJsonLogs) {
// In development, use default console logging with nice formatting
await configure({
sinks: {
console: getConsoleSink(),
@@ -66,7 +50,6 @@ export async function initLogger() {
return;
}
// In production/JSON mode, use Railway-compatible JSON formatter
await configure({
sinks: {
json: (record: LogRecord) => {
@@ -75,13 +58,11 @@ export async function initLogger() {
},
filters: {},
loggers: [
// Meta logger for LogTape's internal messages
{
category: ["logtape", "meta"],
lowestLevel: "warning",
sinks: ["json"],
},
// SSR application logs
{
category: ["ssr"],
lowestLevel: "info",
@@ -90,7 +71,6 @@ export async function initLogger() {
],
});
} catch (error) {
// Already configured (HMR in dev mode), silently ignore
if (
error instanceof Error &&
error.message.includes("Already configured")
+1 -6
View File
@@ -8,17 +8,12 @@
</script>
<AppWrapper class="overflow-x-hidden font-schibsted">
<!-- Top Navigation Bar -->
<div class="flex w-full justify-end items-center pt-5 px-6 pb-9">
<!-- <div class="flex gap-4 items-center"></div> -->
</div>
<div class="flex w-full justify-end items-center pt-5 px-6 pb-9"></div>
<!-- Main Content -->
<div class="flex items-center flex-col">
<div
class="max-w-2xl mx-6 border-b border-zinc-700 divide-y divide-zinc-700"
>
<!-- Name & Occupation -->
<div class="flex flex-col pb-4">
<span class="text-3xl font-bold text-white">Ryan Walters,</span>
<span class="text-2xl font-normal text-zinc-400">
+1 -14
View File
@@ -9,9 +9,6 @@ interface RailwayLogEntry {
[key: string]: unknown;
}
/**
* Railway-compatible JSON formatter for Vite logs
*/
function railwayFormatter(record: LogRecord): string {
const entry: RailwayLogEntry = {
timestamp: new Date().toISOString(),
@@ -20,7 +17,6 @@ function railwayFormatter(record: LogRecord): string {
target: "vite",
};
// Flatten properties to root level
if (record.properties && Object.keys(record.properties).length > 0) {
Object.assign(entry, record.properties);
}
@@ -28,7 +24,6 @@ function railwayFormatter(record: LogRecord): string {
return JSON.stringify(entry) + "\n";
}
// Strip ANSI escape codes from strings
function stripAnsi(str: string): string {
return str.replace(/\u001b\[[0-9;]*m/g, "").trim();
}
@@ -37,14 +32,12 @@ export function jsonLogger(): Plugin {
const useJsonLogs =
process.env.LOG_JSON === "true" || process.env.LOG_JSON === "1";
// If JSON logging is disabled, return a minimal plugin that does nothing
if (!useJsonLogs) {
return {
name: "vite-plugin-json-logger",
};
}
// Configure LogTape for Vite plugin logging
let loggerConfigured = false;
const configureLogger = async () => {
if (loggerConfigured) return;
@@ -56,7 +49,6 @@ export function jsonLogger(): Plugin {
},
filters: {},
loggers: [
// Suppress LogTape meta logger info messages
{
category: ["logtape", "meta"],
lowestLevel: "warning",
@@ -86,7 +78,6 @@ export function jsonLogger(): Plugin {
customLogger: {
info(msg: string) {
const cleaned = stripAnsi(msg);
// Filter out noise
if (
!cleaned ||
ignoredMessages.has(cleaned) ||
@@ -108,9 +99,7 @@ export function jsonLogger(): Plugin {
logger.error(cleaned);
}
},
clearScreen() {
// No-op since clearScreen is already false
},
clearScreen() {},
hasErrorLogged() {
return false;
},
@@ -126,7 +115,6 @@ export function jsonLogger(): Plugin {
server = s;
const logger = getLogger(["vite"]);
// Override the default URL printing
const originalPrintUrls = server.printUrls;
server.printUrls = () => {
const urls = server.resolvedUrls;
@@ -138,7 +126,6 @@ export function jsonLogger(): Plugin {
}
};
// Listen to server events
server.httpServer?.once("listening", () => {
logger.info("server listening");
});