feat: implement unified deployment with Docker and Railway integration

This commit introduces a comprehensive deployment strategy that unifies the frontend and backend into a single Docker container served by the Rust backend, streamlining the deployment process and improving production architecture.

Key changes:
- Split CI/CD workflows: separated build.yaml (for CI/PR checks) and deploy.yaml (for production deployment)
- Implemented unified Docker deployment where the Axum server serves both API routes (under /api) and frontend static files
- Added GitHub Container Registry integration for Docker image distribution
- Updated Railway configuration to use the new healthcheck path (/api/health)
- Enhanced postgres.ts script with named volumes and constants for better container management
- Added API client utilities (web/lib/api.ts) and environment configuration (web/.env.example) for frontend-backend communication
- Configured Vite proxy for local development while supporting same-origin requests in production
- Updated Dockerfile to include frontend static files and proper environment variable handling

This architecture eliminates the need for separate deployments and CORS configuration, as the frontend and API are served from the same origin.
This commit is contained in:
Ryan Walters
2025-11-02 19:31:22 -06:00
parent 4002729ef7
commit 45e6131121
11 changed files with 276 additions and 116 deletions

View File

@@ -10,7 +10,7 @@ on:
workflow_dispatch: workflow_dispatch:
permissions: permissions:
contents: write contents: read
jobs: jobs:
build: build:
@@ -100,96 +100,3 @@ jobs:
path: ./target/release/${{ matrix.artifact_name }} path: ./target/release/${{ matrix.artifact_name }}
retention-days: 7 retention-days: 7
if-no-files-found: error if-no-files-found: error
wasm:
name: Build (wasm32-unknown-emscripten)
runs-on: ubuntu-latest
permissions:
pages: write
id-token: write
# concurrency group is used to prevent multiple page deployments from being attempted at the same time
concurrency:
group: ${{ github.workflow }}-wasm
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Setup Emscripten SDK
uses: pyodide/setup-emsdk@v15
with:
version: 3.1.43
actions-cache-folder: "emsdk-cache-b"
- name: Setup Rust (WASM32 Emscripten)
uses: dtolnay/rust-toolchain@master
with:
target: wasm32-unknown-emscripten
toolchain: 1.86.0
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
- name: Build with Emscripten
shell: bash
run: |
# Retry mechanism for Emscripten build - only retry on specific hash errors
MAX_RETRIES=3
RETRY_DELAY=30
for attempt in $(seq 1 $MAX_RETRIES); do
echo "Build attempt $attempt of $MAX_RETRIES"
# Capture output and check for specific error while preserving real-time output
if bun run -i pacman/web.build.ts 2>&1 | tee /tmp/build_output.log; then
echo "Build successful on attempt $attempt"
break
else
echo "Build failed on attempt $attempt"
# Check if the failure was due to the specific hash error
if grep -q "emcc: error: Unexpected hash:" /tmp/build_output.log; then
echo "::warning::Detected 'emcc: error: Unexpected hash:' error - will retry (attempt $attempt of $MAX_RETRIES)"
if [ $attempt -eq $MAX_RETRIES ]; then
echo "::error::All retry attempts failed. Exiting with error."
exit 1
fi
echo "Waiting $RETRY_DELAY seconds before retry..."
sleep $RETRY_DELAY
# Exponential backoff: double the delay for next attempt
RETRY_DELAY=$((RETRY_DELAY * 2))
else
echo "Build failed but not due to hash error - not retrying"
exit 1
fi
fi
done
- name: Install web dependencies
shell: bash
run: bun install
working-directory: web
- name: Build web frontend
shell: bash
run: bun run build
working-directory: web
- name: Upload Artifact
uses: actions/upload-pages-artifact@v4
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
with:
path: "./web/dist/"
retention-days: 7
- name: Deploy
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
uses: actions/deploy-pages@v4

140
.github/workflows/deploy.yaml vendored Normal file
View File

@@ -0,0 +1,140 @@
name: Deploy to Railway
on:
push:
branches:
- master
workflow_dispatch:
permissions:
contents: read
packages: write
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
build-and-deploy:
name: Build and Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Setup Emscripten SDK
uses: pyodide/setup-emsdk@v15
with:
version: 3.1.43
actions-cache-folder: "emsdk-cache-b"
- name: Setup Rust (WASM32 Emscripten)
uses: dtolnay/rust-toolchain@master
with:
target: wasm32-unknown-emscripten
toolchain: 1.86.0
- name: Rust Cache
uses: Swatinem/rust-cache@v2
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
# ========== WASM Build ==========
- name: Build WASM with Emscripten
shell: bash
run: |
# Retry mechanism for Emscripten build - only retry on specific hash errors
MAX_RETRIES=3
RETRY_DELAY=30
for attempt in $(seq 1 $MAX_RETRIES); do
echo "Build attempt $attempt of $MAX_RETRIES"
# Capture output and check for specific error while preserving real-time output
if bun run -i pacman/web.build.ts 2>&1 | tee /tmp/build_output.log; then
echo "Build successful on attempt $attempt"
break
else
echo "Build failed on attempt $attempt"
# Check if the failure was due to the specific hash error
if grep -q "emcc: error: Unexpected hash:" /tmp/build_output.log; then
echo "::warning::Detected 'emcc: error: Unexpected hash:' error - will retry (attempt $attempt of $MAX_RETRIES)"
if [ $attempt -eq $MAX_RETRIES ]; then
echo "::error::All retry attempts failed. Exiting with error."
exit 1
fi
echo "Waiting $RETRY_DELAY seconds before retry..."
sleep $RETRY_DELAY
# Exponential backoff: double the delay for next attempt
RETRY_DELAY=$((RETRY_DELAY * 2))
else
echo "Build failed but not due to hash error - not retrying"
exit 1
fi
fi
done
# ========== Frontend Build ==========
- name: Install web dependencies
shell: bash
run: bun install
working-directory: web
- name: Build web frontend
shell: bash
run: bun run build
working-directory: web
env:
# API URL is relative (/api) since frontend and backend are on same domain
VITE_API_URL: /api
- name: Verify frontend build output
shell: bash
run: |
if [ ! -d "web/dist/client" ]; then
echo "::error::Frontend build output not found at web/dist/client"
exit 1
fi
echo "Frontend build successful, files ready for Docker image"
ls -la web/dist/client
# ========== Docker Build and Push ==========
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=sha,prefix={{branch}}-
type=raw,value=latest,enable={{is_default_branch}}
- name: Build and push Docker image
uses: docker/build-push-action@v6
with:
context: .
file: ./pacman-server/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
GIT_COMMIT_SHA=${{ github.sha }}

View File

@@ -4,6 +4,10 @@ import { join, dirname } from "path";
import { fileURLToPath } from "url"; import { fileURLToPath } from "url";
import { createInterface } from "readline"; import { createInterface } from "readline";
// Constants for container and volume names
const CONTAINER_NAME = "pacman-server-postgres";
const VOLUME_NAME = "pacman-postgres-data";
// Helper function to get user input // Helper function to get user input
async function getUserChoice( async function getUserChoice(
prompt: string, prompt: string,
@@ -122,9 +126,9 @@ if (databaseUrlLine !== -1) {
// Check if container exists // Check if container exists
console.log("Checking for existing container..."); console.log("Checking for existing container...");
const containerExists = const containerExists =
await $`docker ps -a --filter name=pacman-server-postgres --format "{{.Names}}"` await $`docker ps -a --filter name=${CONTAINER_NAME} --format "{{.Names}}"`
.text() .text()
.then((names) => names.trim() === "pacman-server-postgres") .then((names) => names.trim() === CONTAINER_NAME)
.catch(() => false); .catch(() => false);
let shouldReplaceContainer = false; let shouldReplaceContainer = false;
@@ -142,7 +146,13 @@ if (containerExists) {
if (shouldReplaceContainer) { if (shouldReplaceContainer) {
console.log("Removing existing container..."); console.log("Removing existing container...");
await $`docker rm --force --volumes pacman-server-postgres`; await $`docker rm --force --volumes ${CONTAINER_NAME}`;
// Explicitly remove the named volume to ensure clean state
console.log("Removing volume...");
await $`docker volume rm ${VOLUME_NAME}`.catch(() => {
console.log("Volume doesn't exist or already removed");
});
} else { } else {
console.log("Using existing container"); console.log("Using existing container");
} }
@@ -151,12 +161,12 @@ if (containerExists) {
// Create container if needed // Create container if needed
if (!containerExists || shouldReplaceContainer) { if (!containerExists || shouldReplaceContainer) {
console.log("Creating PostgreSQL container..."); console.log("Creating PostgreSQL container...");
await $`docker run --detach --name pacman-server-postgres --publish 5432:5432 --env POSTGRES_USER=postgres --env POSTGRES_PASSWORD=postgres --env POSTGRES_DB=pacman-server postgres:17`; await $`docker run --detach --name ${CONTAINER_NAME} --publish 5432:5432 --volume ${VOLUME_NAME}:/var/lib/postgresql/data --env POSTGRES_USER=postgres --env POSTGRES_PASSWORD=postgres --env POSTGRES_DB=pacman-server postgres:17`;
} }
// Format DATABASE_URL // Format DATABASE_URL
const databaseUrl = const databaseUrl =
"postgresql://postgres:postgres@localhost:5432/pacman-server"; "postgresql://postgres:postgres@127.0.0.1:5432/pacman-server";
// Handle the final action based on user choice // Handle the final action based on user choice
if (userChoice === "2") { if (userChoice === "2") {

31
Cargo.lock generated
View File

@@ -1995,6 +1995,12 @@ dependencies = [
"pin-project-lite", "pin-project-lite",
] ]
[[package]]
name = "http-range-header"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
[[package]] [[package]]
name = "httparse" name = "httparse"
version = "1.10.1" version = "1.10.1"
@@ -2664,6 +2670,16 @@ version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
[[package]]
name = "mime_guess"
version = "2.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
dependencies = [
"mime",
"unicase",
]
[[package]] [[package]]
name = "minidom" name = "minidom"
version = "0.16.0" version = "0.16.0"
@@ -5467,11 +5483,20 @@ checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
dependencies = [ dependencies = [
"bitflags 2.9.4", "bitflags 2.9.4",
"bytes 1.10.1", "bytes 1.10.1",
"futures-core",
"futures-util", "futures-util",
"http", "http",
"http-body", "http-body",
"http-body-util",
"http-range-header",
"httpdate",
"iri-string", "iri-string",
"mime",
"mime_guess",
"percent-encoding",
"pin-project-lite", "pin-project-lite",
"tokio 1.47.1",
"tokio-util",
"tower", "tower",
"tower-layer", "tower-layer",
"tower-service", "tower-service",
@@ -5647,6 +5672,12 @@ dependencies = [
"version_check", "version_check",
] ]
[[package]]
name = "unicase"
version = "2.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
[[package]] [[package]]
name = "unicode-bidi" name = "unicode-bidi"
version = "0.3.18" version = "0.3.18"

View File

@@ -44,7 +44,7 @@ jsonwebtoken = { version = "9.3", default-features = false }
tracing = "0.1.41" tracing = "0.1.41"
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "json"] } tracing-subscriber = { version = "0.3.20", features = ["env-filter", "json"] }
tracing-futures = { version = "0.2.5", features = ["tokio"] } tracing-futures = { version = "0.2.5", features = ["tokio"] }
tower-http = { version = "0.6", features = ["trace"] } tower-http = { version = "0.6", features = ["trace", "fs"] }
time = { version = "0.3", features = ["macros", "formatting"] } time = { version = "0.3", features = ["macros", "formatting"] }
yansi = "1" yansi = "1"
s3-tokio = { version = "0.39.6", default-features = false } s3-tokio = { version = "0.39.6", default-features = false }

View File

@@ -1,4 +1,5 @@
ARG RUST_VERSION=1.89.0 ARG RUST_VERSION=1.89.0
ARG GIT_COMMIT_SHA
FROM lukemathwalker/cargo-chef:latest-rust-${RUST_VERSION} AS chef FROM lukemathwalker/cargo-chef:latest-rust-${RUST_VERSION} AS chef
WORKDIR /app WORKDIR /app
@@ -25,6 +26,10 @@ FROM debian:bookworm-slim AS runtime
WORKDIR /app WORKDIR /app
COPY --from=builder /app/target/release/pacman-server /usr/local/bin/pacman-server COPY --from=builder /app/target/release/pacman-server /usr/local/bin/pacman-server
# Copy frontend static files (built by GitHub Actions)
# These files should be in web/dist/client/ in the build context
COPY web/dist/client /app/static
# Install runtime dependencies # Install runtime dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \ ca-certificates \
@@ -35,12 +40,15 @@ ARG TZ=Etc/UTC
ENV TZ=${TZ} ENV TZ=${TZ}
# Optional build-time environment variable for embedding the Git commit SHA # Optional build-time environment variable for embedding the Git commit SHA
ARG RAILWAY_GIT_COMMIT_SHA ARG GIT_COMMIT_SHA
ENV RAILWAY_GIT_COMMIT_SHA=${RAILWAY_GIT_COMMIT_SHA} ENV GIT_COMMIT_SHA=${GIT_COMMIT_SHA}
# Specify PORT at build-time or run-time, default to 3000 # Specify PORT at build-time or run-time, default to 3000
ARG PORT=3000 ARG PORT=3000
ENV PORT=${PORT} ENV PORT=${PORT}
EXPOSE ${PORT} EXPOSE ${PORT}
# Set static files directory for the server to serve
ENV STATIC_FILES_DIR=/app/static
CMD ["sh", "-c", "exec /usr/local/bin/pacman-server"] CMD ["sh", "-c", "exec /usr/local/bin/pacman-server"]

View File

@@ -2,10 +2,12 @@ use axum::{routing::get, Router};
use axum_cookie::CookieLayer; use axum_cookie::CookieLayer;
use dashmap::DashMap; use dashmap::DashMap;
use jsonwebtoken::{DecodingKey, EncodingKey}; use jsonwebtoken::{DecodingKey, EncodingKey};
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::{Notify, RwLock}; use tokio::sync::{Notify, RwLock};
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
use tower_http::services::{ServeDir, ServeFile};
use tracing::info_span; use tracing::info_span;
use crate::data::pool::PgPool; use crate::data::pool::PgPool;
@@ -159,8 +161,25 @@ pub fn make_span<B>(request: &axum::http::Request<B>) -> tracing::Span {
/// Create the application router with all routes and middleware /// Create the application router with all routes and middleware
pub fn create_router(app_state: AppState) -> Router { pub fn create_router(app_state: AppState) -> Router {
Router::new() // Get static files directory from environment variable
.route("/", get(|| async { "Hello, World! Visit /auth/github to start OAuth flow." })) // Default to /app/static for production (Docker), or web/dist/client for local dev
let static_dir = std::env::var("STATIC_FILES_DIR").unwrap_or_else(|_| {
if std::path::Path::new("/app/static").exists() {
"/app/static".to_string()
} else {
"web/dist/client".to_string()
}
});
let static_path = PathBuf::from(&static_dir);
let index_path = static_path.join("index.html");
// Create API router with all backend routes
let api_router = Router::new()
.route(
"/",
get(|| async { "Pac-Man API Server. Visit /api/auth/github to start OAuth flow." }),
)
.route("/health", get(routes::health_handler)) .route("/health", get(routes::health_handler))
.route("/auth/providers", get(routes::list_providers_handler)) .route("/auth/providers", get(routes::list_providers_handler))
.route("/auth/{provider}", get(routes::oauth_authorize_handler)) .route("/auth/{provider}", get(routes::oauth_authorize_handler))
@@ -169,14 +188,28 @@ pub fn create_router(app_state: AppState) -> Router {
.route("/profile", get(routes::profile_handler)) .route("/profile", get(routes::profile_handler))
.with_state(app_state) .with_state(app_state)
.layer(CookieLayer::default()) .layer(CookieLayer::default())
.layer(axum::middleware::from_fn(inject_server_header)) .layer(axum::middleware::from_fn(inject_server_header));
.layer(
tower_http::trace::TraceLayer::new_for_http() // Create main router with API routes nested under /api
.make_span_with(make_span) let router = Router::new().nest("/api", api_router);
.on_request(|_request: &axum::http::Request<axum::body::Body>, _span: &tracing::Span| {
// Disable request logging by doing nothing // Add static file serving if the directory exists
}), let router = if static_path.exists() {
) tracing::info!(path = %static_dir, "Serving static files from directory");
router.fallback_service(ServeDir::new(&static_path).not_found_service(ServeFile::new(&index_path)))
} else {
tracing::warn!(path = %static_dir, "Static files directory not found, serving API only");
router
};
// Add tracing layer to the entire router
router.layer(
tower_http::trace::TraceLayer::new_for_http()
.make_span_with(make_span)
.on_request(|_request: &axum::http::Request<axum::body::Body>, _span: &tracing::Span| {
// Disable request logging by doing nothing
}),
)
} }
/// Inject the server header into responses /// Inject the server header into responses

View File

@@ -2,11 +2,8 @@
"$schema": "https://railway.com/railway.schema.json", "$schema": "https://railway.com/railway.schema.json",
"deploy": { "deploy": {
"drainingSeconds": 10, "drainingSeconds": 10,
"healthcheckPath": "/health", "healthcheckPath": "/api/health",
"healthcheckTimeout": 90, "healthcheckTimeout": 90,
"restartPolicyMaxRetries": 3 "restartPolicyMaxRetries": 3
},
"build": {
"dockerfilePath": "/pacman-server/Dockerfile"
} }
} }

11
web/.env.example Normal file
View File

@@ -0,0 +1,11 @@
# Frontend Environment Variables
# API URL (for production builds)
# In production with unified deployment, this should be "/api" (same-origin)
# For local development, this is handled by the Vite proxy
VITE_API_URL=/api
# API Proxy Target (for local development only)
# Point this to your local backend server
# Default: http://localhost:3001 (backend runs on 3001, frontend on 3000)
VITE_API_TARGET=http://localhost:3001

13
web/lib/api.ts Normal file
View File

@@ -0,0 +1,13 @@
// Get API base URL from environment variable, or default to /api for same-origin requests
export const API_BASE_URL = import.meta.env.VITE_API_URL || "/api";
/**
* Helper function to construct full API URLs
* @param path - API endpoint path (without leading slash, e.g., "leaderboard/global")
* @returns Full API URL
*/
export function getApiUrl(path: string): string {
// Remove leading slash if present to avoid double slashes
const cleanPath = path.startsWith("/") ? path.slice(1) : path;
return `${API_BASE_URL}/${cleanPath}`;
}

View File

@@ -8,4 +8,14 @@ export default defineConfig({
build: { build: {
target: "es2022", target: "es2022",
}, },
server: {
// Proxy API requests to the backend server during local development
// In production, both frontend and API are served from the same origin
proxy: {
'/api': {
target: process.env.VITE_API_TARGET || 'http://localhost:3001',
changeOrigin: true,
},
},
},
}); });