mirror of
https://github.com/Xevion/xevion.dev.git
synced 2026-01-31 22:26:33 -06:00
feat: add ISR cache with stale-while-revalidate pattern
Implements in-memory caching for SSR pages using moka with: - Configurable fresh/stale TTLs (60s/300s defaults) - Background refresh for stale entries - Cache invalidation on project/tag mutations - Pre-cached icon collections on startup - Skips cache for authenticated requests
This commit is contained in:
+313
@@ -0,0 +1,313 @@
|
||||
//! ISR (Incremental Static Regeneration) cache implementation
|
||||
//!
|
||||
//! Provides in-memory caching for SSR pages with:
|
||||
//! - TTL-based expiration
|
||||
//! - Stale-while-revalidate pattern
|
||||
//! - Singleflight (via moka's built-in coalescing)
|
||||
//! - On-demand invalidation
|
||||
|
||||
use axum::http::{HeaderMap, StatusCode};
|
||||
use dashmap::DashSet;
|
||||
use moka::future::Cache;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
/// Cached response data
|
||||
#[derive(Clone)]
|
||||
pub struct CachedResponse {
|
||||
pub status: StatusCode,
|
||||
pub headers: HeaderMap,
|
||||
pub body: axum::body::Bytes,
|
||||
pub cached_at: Instant,
|
||||
}
|
||||
|
||||
impl CachedResponse {
|
||||
pub fn new(status: StatusCode, headers: HeaderMap, body: axum::body::Bytes) -> Self {
|
||||
Self {
|
||||
status,
|
||||
headers,
|
||||
body,
|
||||
cached_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this response is still fresh (within fresh_duration)
|
||||
pub fn is_fresh(&self, fresh_duration: Duration) -> bool {
|
||||
self.cached_at.elapsed() < fresh_duration
|
||||
}
|
||||
|
||||
/// Check if this response is stale but still usable (within stale_duration)
|
||||
pub fn is_stale_but_usable(&self, fresh_duration: Duration, stale_duration: Duration) -> bool {
|
||||
let age = self.cached_at.elapsed();
|
||||
age >= fresh_duration && age < stale_duration
|
||||
}
|
||||
|
||||
/// Get the age of this cached response
|
||||
pub fn age(&self) -> Duration {
|
||||
self.cached_at.elapsed()
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the ISR cache
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IsrCacheConfig {
|
||||
/// Maximum number of cached entries
|
||||
pub max_entries: u64,
|
||||
/// Duration a response is considered fresh (served without refresh)
|
||||
pub fresh_duration: Duration,
|
||||
/// Total duration before entry is evicted (stale responses served during refresh)
|
||||
pub stale_duration: Duration,
|
||||
/// Whether caching is enabled
|
||||
pub enabled: bool,
|
||||
}
|
||||
|
||||
impl Default for IsrCacheConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_entries: 1000,
|
||||
fresh_duration: Duration::from_secs(60),
|
||||
stale_duration: Duration::from_secs(300),
|
||||
enabled: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IsrCacheConfig {
|
||||
/// Load configuration from environment variables
|
||||
pub fn from_env() -> Self {
|
||||
let max_entries = std::env::var("ISR_CACHE_MAX_ENTRIES")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.unwrap_or(1000);
|
||||
|
||||
let fresh_sec = std::env::var("ISR_CACHE_FRESH_SEC")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.unwrap_or(60);
|
||||
|
||||
let stale_sec = std::env::var("ISR_CACHE_STALE_SEC")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.unwrap_or(300);
|
||||
|
||||
let enabled = std::env::var("ISR_CACHE_ENABLED")
|
||||
.map(|v| v != "false" && v != "0")
|
||||
.unwrap_or(true);
|
||||
|
||||
Self {
|
||||
max_entries,
|
||||
fresh_duration: Duration::from_secs(fresh_sec),
|
||||
stale_duration: Duration::from_secs(stale_sec),
|
||||
enabled,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// ISR cache for SSR page responses
|
||||
pub struct IsrCache {
|
||||
cache: Cache<String, Arc<CachedResponse>>,
|
||||
/// Tracks paths currently being refreshed in background
|
||||
refreshing: DashSet<String>,
|
||||
pub config: IsrCacheConfig,
|
||||
}
|
||||
|
||||
impl IsrCache {
|
||||
/// Create a new ISR cache with the given configuration
|
||||
pub fn new(config: IsrCacheConfig) -> Self {
|
||||
let cache = Cache::builder()
|
||||
.max_capacity(config.max_entries)
|
||||
// Use stale_duration as TTL - we handle fresh/stale logic ourselves
|
||||
.time_to_live(config.stale_duration)
|
||||
.name("isr_cache")
|
||||
.build();
|
||||
|
||||
Self {
|
||||
cache,
|
||||
refreshing: DashSet::new(),
|
||||
config,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a cached response if it exists
|
||||
pub async fn get(&self, path: &str) -> Option<Arc<CachedResponse>> {
|
||||
if !self.config.enabled {
|
||||
return None;
|
||||
}
|
||||
self.cache.get(path).await
|
||||
}
|
||||
|
||||
/// Insert a response into the cache
|
||||
pub async fn insert(&self, path: String, response: CachedResponse) {
|
||||
if !self.config.enabled {
|
||||
return;
|
||||
}
|
||||
self.cache.insert(path, Arc::new(response)).await;
|
||||
}
|
||||
|
||||
/// Check if a path is currently being refreshed
|
||||
pub fn is_refreshing(&self, path: &str) -> bool {
|
||||
self.refreshing.contains(path)
|
||||
}
|
||||
|
||||
/// Mark a path as being refreshed. Returns true if it wasn't already refreshing.
|
||||
pub fn start_refresh(&self, path: &str) -> bool {
|
||||
self.refreshing.insert(path.to_string())
|
||||
}
|
||||
|
||||
/// Mark a path refresh as complete
|
||||
pub fn end_refresh(&self, path: &str) {
|
||||
self.refreshing.remove(path);
|
||||
}
|
||||
|
||||
/// Invalidate a single cached path
|
||||
pub async fn invalidate(&self, path: &str) {
|
||||
self.cache.invalidate(path).await;
|
||||
tracing::debug!(path = %path, "Cache entry invalidated");
|
||||
}
|
||||
|
||||
/// Invalidate multiple cached paths
|
||||
pub async fn invalidate_many(&self, paths: &[&str]) {
|
||||
for path in paths {
|
||||
self.cache.invalidate(*path).await;
|
||||
}
|
||||
tracing::info!(paths = ?paths, "Cache entries invalidated");
|
||||
}
|
||||
|
||||
/// Invalidate all entries matching a prefix
|
||||
pub async fn invalidate_prefix(&self, prefix: &str) {
|
||||
// moka doesn't have prefix invalidation, so we need to iterate
|
||||
// This is O(n) but invalidation should be infrequent
|
||||
let prefix_owned = prefix.to_string();
|
||||
self.cache
|
||||
.invalidate_entries_if(move |key, _| key.starts_with(&prefix_owned))
|
||||
.ok();
|
||||
tracing::info!(prefix = %prefix, "Cache entries with prefix invalidated");
|
||||
}
|
||||
|
||||
/// Invalidate all cached entries
|
||||
pub async fn invalidate_all(&self) {
|
||||
self.cache.invalidate_all();
|
||||
tracing::info!("All cache entries invalidated");
|
||||
}
|
||||
|
||||
/// Get cache statistics
|
||||
pub fn stats(&self) -> CacheStats {
|
||||
CacheStats {
|
||||
entry_count: self.cache.entry_count(),
|
||||
weighted_size: self.cache.weighted_size(),
|
||||
refreshing_count: self.refreshing.len(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cache statistics for observability
|
||||
#[derive(Debug, Clone, serde::Serialize)]
|
||||
pub struct CacheStats {
|
||||
pub entry_count: u64,
|
||||
pub weighted_size: u64,
|
||||
pub refreshing_count: usize,
|
||||
}
|
||||
|
||||
/// Determines if a path should be cached
|
||||
///
|
||||
/// Excludes:
|
||||
/// - Admin pages (session-specific)
|
||||
/// - API routes (handled separately)
|
||||
/// - Internal routes
|
||||
/// - Static assets (served directly from embedded files)
|
||||
pub fn is_cacheable_path(path: &str) -> bool {
|
||||
// Never cache admin pages - they're session-specific
|
||||
if path.starts_with("/admin") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Never cache API routes
|
||||
if path.starts_with("/api/") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Never cache internal routes
|
||||
if path.starts_with("/internal/") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't cache static assets (they're served from embedded files anyway)
|
||||
if path.starts_with("/_app/") || path.starts_with("/.") {
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Normalize a path into a cache key
|
||||
///
|
||||
/// For now, keeps query strings as part of the key since SSR pages
|
||||
/// may render differently based on query params (e.g., ?tag=rust)
|
||||
pub fn cache_key(path: &str, query: Option<&str>) -> String {
|
||||
match query {
|
||||
Some(q) if !q.is_empty() => format!("{path}?{q}"),
|
||||
_ => path.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_is_cacheable_path() {
|
||||
// Should cache
|
||||
assert!(is_cacheable_path("/"));
|
||||
assert!(is_cacheable_path("/projects"));
|
||||
assert!(is_cacheable_path("/projects/my-project"));
|
||||
|
||||
// Should not cache
|
||||
assert!(!is_cacheable_path("/admin"));
|
||||
assert!(!is_cacheable_path("/admin/projects"));
|
||||
assert!(!is_cacheable_path("/api/projects"));
|
||||
assert!(!is_cacheable_path("/internal/health"));
|
||||
assert!(!is_cacheable_path("/_app/immutable/foo.js"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_key() {
|
||||
assert_eq!(cache_key("/projects", None), "/projects");
|
||||
assert_eq!(cache_key("/projects", Some("")), "/projects");
|
||||
assert_eq!(
|
||||
cache_key("/projects", Some("tag=rust")),
|
||||
"/projects?tag=rust"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cached_response_freshness() {
|
||||
let response = CachedResponse::new(
|
||||
StatusCode::OK,
|
||||
HeaderMap::new(),
|
||||
axum::body::Bytes::from_static(b"test"),
|
||||
);
|
||||
|
||||
let fresh = Duration::from_millis(100);
|
||||
let stale = Duration::from_millis(200);
|
||||
|
||||
// Should be fresh immediately
|
||||
assert!(response.is_fresh(fresh));
|
||||
assert!(!response.is_stale_but_usable(fresh, stale));
|
||||
|
||||
// Wait a bit
|
||||
tokio::time::sleep(Duration::from_millis(110)).await;
|
||||
|
||||
// Should be stale but usable
|
||||
assert!(!response.is_fresh(fresh));
|
||||
assert!(response.is_stale_but_usable(fresh, stale));
|
||||
|
||||
// Wait more
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Should be neither fresh nor usable
|
||||
assert!(!response.is_fresh(fresh));
|
||||
assert!(!response.is_stale_but_usable(fresh, stale));
|
||||
}
|
||||
}
|
||||
+43
-14
@@ -243,6 +243,9 @@ pub async fn create_project_handler(
|
||||
|
||||
tracing::info!(project_id = %project.id, project_name = %project.name, "Project created");
|
||||
|
||||
// Invalidate cached pages that display projects
|
||||
state.isr_cache.invalidate_many(&["/", "/projects"]).await;
|
||||
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
Json(project.to_api_admin_project(tags)),
|
||||
@@ -410,6 +413,14 @@ pub async fn update_project_handler(
|
||||
|
||||
tracing::info!(project_id = %project.id, project_name = %project.name, "Project updated");
|
||||
|
||||
// Invalidate cached pages that display projects
|
||||
// Also invalidate slug-based path in case project detail pages exist
|
||||
let project_path = format!("/projects/{}", project.slug);
|
||||
state
|
||||
.isr_cache
|
||||
.invalidate_many(&["/", "/projects", &project_path])
|
||||
.await;
|
||||
|
||||
Json(project.to_api_admin_project(tags)).into_response()
|
||||
}
|
||||
|
||||
@@ -469,6 +480,14 @@ pub async fn delete_project_handler(
|
||||
match db::delete_project(&state.pool, project_id).await {
|
||||
Ok(()) => {
|
||||
tracing::info!(project_id = %project_id, project_name = %project.name, "Project deleted");
|
||||
|
||||
// Invalidate cached pages that display projects
|
||||
let project_path = format!("/projects/{}", project.slug);
|
||||
state
|
||||
.isr_cache
|
||||
.invalidate_many(&["/", "/projects", &project_path])
|
||||
.await;
|
||||
|
||||
Json(project.to_api_admin_project(tags)).into_response()
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -588,13 +607,18 @@ pub async fn add_project_tag_handler(
|
||||
};
|
||||
|
||||
match db::add_tag_to_project(&state.pool, project_id, tag_id).await {
|
||||
Ok(()) => (
|
||||
StatusCode::CREATED,
|
||||
Json(serde_json::json!({
|
||||
"message": "Tag added to project"
|
||||
})),
|
||||
)
|
||||
.into_response(),
|
||||
Ok(()) => {
|
||||
// Invalidate cached pages - tags affect how projects are displayed
|
||||
state.isr_cache.invalidate_many(&["/", "/projects"]).await;
|
||||
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
Json(serde_json::json!({
|
||||
"message": "Tag added to project"
|
||||
})),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
Err(sqlx::Error::Database(db_err)) if db_err.is_foreign_key_violation() => (
|
||||
StatusCode::NOT_FOUND,
|
||||
Json(serde_json::json!({
|
||||
@@ -655,13 +679,18 @@ pub async fn remove_project_tag_handler(
|
||||
};
|
||||
|
||||
match db::remove_tag_from_project(&state.pool, project_id, tag_id).await {
|
||||
Ok(()) => (
|
||||
StatusCode::OK,
|
||||
Json(serde_json::json!({
|
||||
"message": "Tag removed from project"
|
||||
})),
|
||||
)
|
||||
.into_response(),
|
||||
Ok(()) => {
|
||||
// Invalidate cached pages - tags affect how projects are displayed
|
||||
state.isr_cache.invalidate_many(&["/", "/projects"]).await;
|
||||
|
||||
(
|
||||
StatusCode::OK,
|
||||
Json(serde_json::json!({
|
||||
"message": "Tag removed from project"
|
||||
})),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::error!(error = %err, "Failed to remove tag from project");
|
||||
(
|
||||
|
||||
+12
-2
@@ -78,7 +78,12 @@ pub async fn create_tag_handler(
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(tag) => (StatusCode::CREATED, Json(tag.to_api_tag())).into_response(),
|
||||
Ok(tag) => {
|
||||
// Invalidate cached pages - tag list appears on project pages
|
||||
state.isr_cache.invalidate_many(&["/", "/projects"]).await;
|
||||
|
||||
(StatusCode::CREATED, Json(tag.to_api_tag())).into_response()
|
||||
}
|
||||
Err(sqlx::Error::Database(db_err)) if db_err.is_unique_violation() => (
|
||||
StatusCode::CONFLICT,
|
||||
Json(serde_json::json!({
|
||||
@@ -219,7 +224,12 @@ pub async fn update_tag_handler(
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(updated_tag) => Json(updated_tag.to_api_tag()).into_response(),
|
||||
Ok(updated_tag) => {
|
||||
// Invalidate cached pages - tag updates affect project displays
|
||||
state.isr_cache.invalidate_many(&["/", "/projects"]).await;
|
||||
|
||||
Json(updated_tag.to_api_tag()).into_response()
|
||||
}
|
||||
Err(sqlx::Error::Database(db_err)) if db_err.is_unique_violation() => (
|
||||
StatusCode::CONFLICT,
|
||||
Json(serde_json::json!({
|
||||
|
||||
+15
@@ -7,6 +7,7 @@ use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitEx
|
||||
|
||||
mod assets;
|
||||
mod auth;
|
||||
mod cache;
|
||||
mod config;
|
||||
mod db;
|
||||
mod formatter;
|
||||
@@ -22,6 +23,7 @@ mod state;
|
||||
mod tarpit;
|
||||
mod utils;
|
||||
|
||||
use cache::{IsrCache, IsrCacheConfig};
|
||||
use config::{Args, ListenAddr};
|
||||
use formatter::{CustomJsonFormatter, CustomPrettyFormatter};
|
||||
use health::HealthChecker;
|
||||
@@ -153,12 +155,25 @@ async fn main() {
|
||||
"Tarpit initialized"
|
||||
);
|
||||
|
||||
// Initialize ISR cache
|
||||
let isr_cache_config = IsrCacheConfig::from_env();
|
||||
let isr_cache = Arc::new(IsrCache::new(isr_cache_config.clone()));
|
||||
|
||||
tracing::info!(
|
||||
enabled = isr_cache_config.enabled,
|
||||
max_entries = isr_cache_config.max_entries,
|
||||
fresh_sec = isr_cache_config.fresh_duration.as_secs(),
|
||||
stale_sec = isr_cache_config.stale_duration.as_secs(),
|
||||
"ISR cache initialized"
|
||||
);
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
client,
|
||||
health_checker,
|
||||
tarpit_state,
|
||||
pool: pool.clone(),
|
||||
session_manager: session_manager.clone(),
|
||||
isr_cache,
|
||||
});
|
||||
|
||||
// Regenerate common OGP images on startup
|
||||
|
||||
+147
-47
@@ -6,19 +6,21 @@ use axum::{
|
||||
use std::{net::SocketAddr, sync::Arc, time::Duration};
|
||||
|
||||
use crate::{
|
||||
assets, db,
|
||||
assets,
|
||||
cache::{self, CachedResponse},
|
||||
db,
|
||||
state::{AppState, ProxyError},
|
||||
tarpit::{self, TarpitState},
|
||||
utils,
|
||||
};
|
||||
|
||||
/// ISR handler - serves pages through Bun SSR with session validation
|
||||
/// ISR handler - serves pages through Bun SSR with caching and session validation
|
||||
#[tracing::instrument(skip(state, req), fields(path = %req.uri().path(), method = %req.method()))]
|
||||
pub async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Response {
|
||||
let method = req.method().clone();
|
||||
let uri = req.uri();
|
||||
let path = uri.path();
|
||||
let query = uri.query().unwrap_or("");
|
||||
let query = uri.query();
|
||||
|
||||
if method != axum::http::Method::GET && method != axum::http::Method::HEAD {
|
||||
tracing::warn!(method = %method, path = %path, "Non-GET/HEAD request to non-API route");
|
||||
@@ -72,14 +74,11 @@ pub async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Re
|
||||
return response;
|
||||
}
|
||||
|
||||
let path_with_query = if query.is_empty() {
|
||||
path.to_string()
|
||||
} else {
|
||||
format!("{path}?{query}")
|
||||
};
|
||||
let path_with_query = cache::cache_key(path, query);
|
||||
|
||||
// Build trusted headers to forward to downstream
|
||||
let mut forward_headers = HeaderMap::new();
|
||||
let mut is_authenticated = false;
|
||||
|
||||
// SECURITY: Strip any X-Session-User header from incoming request to prevent spoofing
|
||||
|
||||
@@ -101,6 +100,7 @@ pub async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Re
|
||||
axum::http::HeaderValue::from_str(&session.username)
|
||||
{
|
||||
forward_headers.insert("x-session-user", username_value);
|
||||
is_authenticated = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -111,51 +111,64 @@ pub async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Re
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if this request can use the cache
|
||||
// Skip cache for authenticated requests (they see different content)
|
||||
let use_cache = !is_authenticated && cache::is_cacheable_path(path);
|
||||
|
||||
// Try to serve from cache for public requests
|
||||
if use_cache {
|
||||
if let Some(cached) = state.isr_cache.get(&path_with_query).await {
|
||||
let fresh_duration = state.isr_cache.config.fresh_duration;
|
||||
let stale_duration = state.isr_cache.config.stale_duration;
|
||||
|
||||
if cached.is_fresh(fresh_duration) {
|
||||
// Fresh cache hit - serve immediately
|
||||
let age_ms = cached.age().as_millis() as u64;
|
||||
tracing::debug!(cache = "hit", age_ms, "ISR cache hit (fresh)");
|
||||
|
||||
return serve_cached_response(&cached, is_head);
|
||||
} else if cached.is_stale_but_usable(fresh_duration, stale_duration) {
|
||||
// Stale cache hit - serve immediately and refresh in background
|
||||
let age_ms = cached.age().as_millis() as u64;
|
||||
tracing::debug!(cache = "stale", age_ms, "ISR cache hit (stale, refreshing)");
|
||||
|
||||
// Spawn background refresh if not already refreshing
|
||||
if state.isr_cache.start_refresh(&path_with_query) {
|
||||
let state_clone = state.clone();
|
||||
let path_clone = path_with_query.clone();
|
||||
tokio::spawn(async move {
|
||||
refresh_cache_entry(state_clone, path_clone).await;
|
||||
});
|
||||
}
|
||||
|
||||
return serve_cached_response(&cached, is_head);
|
||||
}
|
||||
// Cache entry is too old - fall through to fetch
|
||||
}
|
||||
}
|
||||
|
||||
// Cache miss or non-cacheable - fetch from Bun
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
match proxy_to_bun(&path_with_query, state.clone(), forward_headers).await {
|
||||
Ok((status, headers, body)) => {
|
||||
let duration_ms = start.elapsed().as_millis() as u64;
|
||||
let cache = "miss";
|
||||
|
||||
let is_static = utils::is_static_asset(path);
|
||||
let is_page = utils::is_page_route(path);
|
||||
|
||||
match (status.as_u16(), is_static, is_page) {
|
||||
(200..=299, true, _) => {
|
||||
tracing::trace!(status = status.as_u16(), duration_ms, cache, "ISR request");
|
||||
}
|
||||
(404, true, _) => {
|
||||
tracing::warn!(
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
cache,
|
||||
"ISR request - missing asset"
|
||||
);
|
||||
}
|
||||
(500..=599, true, _) => {
|
||||
tracing::error!(
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
cache,
|
||||
"ISR request - server error"
|
||||
);
|
||||
}
|
||||
(200..=299, _, true) => {
|
||||
tracing::debug!(status = status.as_u16(), duration_ms, cache, "ISR request");
|
||||
}
|
||||
(404, _, true) => {}
|
||||
(500..=599, _, _) => {
|
||||
tracing::error!(
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
cache,
|
||||
"ISR request - server error"
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
tracing::debug!(status = status.as_u16(), duration_ms, cache, "ISR request");
|
||||
}
|
||||
// Cache successful responses for public requests
|
||||
if use_cache && status.is_success() {
|
||||
let cached_response = CachedResponse::new(status, headers.clone(), body.clone());
|
||||
state
|
||||
.isr_cache
|
||||
.insert(path_with_query.clone(), cached_response)
|
||||
.await;
|
||||
tracing::debug!(
|
||||
cache = "miss",
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
"ISR request (cached)"
|
||||
);
|
||||
} else {
|
||||
log_isr_request(path, status, duration_ms, "bypass");
|
||||
}
|
||||
|
||||
// Intercept error responses for HTML requests
|
||||
@@ -194,6 +207,93 @@ pub async fn isr_handler(State(state): State<Arc<AppState>>, req: Request) -> Re
|
||||
}
|
||||
}
|
||||
|
||||
/// Serve a cached response
|
||||
fn serve_cached_response(cached: &CachedResponse, is_head: bool) -> Response {
|
||||
if is_head {
|
||||
(cached.status, cached.headers.clone()).into_response()
|
||||
} else {
|
||||
(cached.status, cached.headers.clone(), cached.body.clone()).into_response()
|
||||
}
|
||||
}
|
||||
|
||||
/// Background task to refresh a stale cache entry
|
||||
async fn refresh_cache_entry(state: Arc<AppState>, cache_key: String) {
|
||||
// No auth headers for background refresh (public content only)
|
||||
let forward_headers = HeaderMap::new();
|
||||
|
||||
match proxy_to_bun(&cache_key, state.clone(), forward_headers).await {
|
||||
Ok((status, headers, body)) => {
|
||||
if status.is_success() {
|
||||
let cached_response = CachedResponse::new(status, headers, body);
|
||||
state
|
||||
.isr_cache
|
||||
.insert(cache_key.clone(), cached_response)
|
||||
.await;
|
||||
tracing::debug!(path = %cache_key, "Cache entry refreshed");
|
||||
} else {
|
||||
tracing::warn!(
|
||||
path = %cache_key,
|
||||
status = status.as_u16(),
|
||||
"Background refresh returned non-success status, keeping stale entry"
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
tracing::warn!(
|
||||
path = %cache_key,
|
||||
error = %err,
|
||||
"Background refresh failed, keeping stale entry"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mark refresh as complete
|
||||
state.isr_cache.end_refresh(&cache_key);
|
||||
}
|
||||
|
||||
/// Log ISR request with appropriate level based on status
|
||||
fn log_isr_request(path: &str, status: StatusCode, duration_ms: u64, cache: &str) {
|
||||
let is_static = utils::is_static_asset(path);
|
||||
let is_page = utils::is_page_route(path);
|
||||
|
||||
match (status.as_u16(), is_static, is_page) {
|
||||
(200..=299, true, _) => {
|
||||
tracing::trace!(status = status.as_u16(), duration_ms, cache, "ISR request");
|
||||
}
|
||||
(404, true, _) => {
|
||||
tracing::warn!(
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
cache,
|
||||
"ISR request - missing asset"
|
||||
);
|
||||
}
|
||||
(500..=599, true, _) => {
|
||||
tracing::error!(
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
cache,
|
||||
"ISR request - server error"
|
||||
);
|
||||
}
|
||||
(200..=299, _, true) => {
|
||||
tracing::debug!(status = status.as_u16(), duration_ms, cache, "ISR request");
|
||||
}
|
||||
(404, _, true) => {}
|
||||
(500..=599, _, _) => {
|
||||
tracing::error!(
|
||||
status = status.as_u16(),
|
||||
duration_ms,
|
||||
cache,
|
||||
"ISR request - server error"
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
tracing::debug!(status = status.as_u16(), duration_ms, cache, "ISR request");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Proxy a request to Bun SSR
|
||||
pub async fn proxy_to_bun(
|
||||
path: &str,
|
||||
|
||||
+5
-1
@@ -1,6 +1,9 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::{auth::SessionManager, health::HealthChecker, http::HttpClient, tarpit::TarpitState};
|
||||
use crate::{
|
||||
auth::SessionManager, cache::IsrCache, health::HealthChecker, http::HttpClient,
|
||||
tarpit::TarpitState,
|
||||
};
|
||||
|
||||
/// Application state shared across all handlers
|
||||
#[derive(Clone)]
|
||||
@@ -10,6 +13,7 @@ pub struct AppState {
|
||||
pub tarpit_state: Arc<TarpitState>,
|
||||
pub pool: sqlx::PgPool,
|
||||
pub session_manager: Arc<SessionManager>,
|
||||
pub isr_cache: Arc<IsrCache>,
|
||||
}
|
||||
|
||||
/// Errors that can occur during proxying to Bun
|
||||
|
||||
Reference in New Issue
Block a user