Compare commits
71 Commits
v0.80.3
...
93e0895fb0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93e0895fb0 | ||
|
|
a60d309a66 | ||
|
|
203a5c0e2e | ||
|
|
cb50ade88f | ||
|
|
984a2e95ca | ||
|
|
50c0033f2f | ||
|
|
61a1590289 | ||
|
|
7eb4705b7c | ||
|
|
a98ad23348 | ||
|
|
3e08720b43 | ||
|
|
c306e992c4 | ||
|
|
9bf8d0428c | ||
|
|
e00d209047 | ||
|
|
8be95a20ba | ||
|
|
ad6fd00197 | ||
|
|
52dee3eee4 | ||
|
|
83e389d789 | ||
|
|
8f8f82630f | ||
|
|
45e6131121 | ||
|
|
4002729ef7 | ||
|
|
fb98c077b5 | ||
|
|
ffc5b8d15b | ||
|
|
4f49355892 | ||
|
|
07e0709c50 | ||
|
|
aeb03aaf52 | ||
|
|
3f6126418a | ||
|
|
54ef292606 | ||
|
|
13e592502f | ||
|
|
d9519746b8 | ||
|
|
55b31ba31e | ||
|
|
c524fdb3e7 | ||
|
|
bdd3c74a2d | ||
|
|
655c3c68d5 | ||
|
|
6134da1d49 | ||
|
|
0b5aeceb51 | ||
|
|
e1b266f3b2 | ||
|
|
76985f6390 | ||
|
|
4a041a7695 | ||
|
|
62b619a3cc | ||
|
|
67c9460c84 | ||
|
|
7e98bc7488 | ||
|
|
698f95ff32 | ||
|
|
54eca9f447 | ||
|
|
f9e79eb6d6 | ||
|
|
408b660490 | ||
|
|
cc06cd88a1 | ||
|
|
e2c725cb95 | ||
|
|
350f92ab21 | ||
|
|
3ad00bdcba | ||
|
|
7f9d3e9158 | ||
|
|
56e02e7253 | ||
|
|
e2f3f6790f | ||
|
|
1be59f474d | ||
|
|
916428fe76 | ||
|
|
e02c2286bb | ||
|
|
c12dc11d8f | ||
|
|
1cf3b901e8 | ||
|
|
ac1417aabc | ||
|
|
8e23fb66a4 | ||
|
|
92acb07b04 | ||
|
|
18e750fa61 | ||
|
|
8d9c0621c9 | ||
|
|
750b47b609 | ||
|
|
b1fae907ee | ||
|
|
f3db44c48b | ||
|
|
264478bdaa | ||
|
|
f69a5c7d52 | ||
|
|
7ede82cc5d | ||
|
|
d0ee7db2ef | ||
|
|
a3c4c94d42 | ||
|
|
841cf5b83e |
@@ -1,9 +1,12 @@
|
||||
[target.'cfg(target_os = "emscripten")']
|
||||
rustflags = [
|
||||
# Stack size is required for this project, it will crash otherwise.
|
||||
"-C", "link-args=-sASYNCIFY=1 -sASYNCIFY_STACK_SIZE=8192 -sALLOW_MEMORY_GROWTH=1",
|
||||
"-C", "link-args=-sUSE_SDL=2 -sUSE_SDL_IMAGE=2 -sUSE_SDL_MIXER=2 -sUSE_OGG=1 -sUSE_SDL_GFX=2 -sUSE_SDL_TTF=2 -sSDL2_IMAGE_FORMATS=['png']",
|
||||
"-C", "link-args=--preload-file assets/game/",
|
||||
"-C",
|
||||
"link-args=-sASYNCIFY=1 -sASYNCIFY_STACK_SIZE=8192 -sALLOW_MEMORY_GROWTH=1",
|
||||
"-C",
|
||||
"link-args=-sUSE_SDL=2 -sUSE_SDL_IMAGE=2 -sUSE_SDL_MIXER=2 -sUSE_OGG=1 -sUSE_SDL_GFX=2 -sUSE_SDL_TTF=2 -sSDL2_IMAGE_FORMATS=['png']",
|
||||
"-C",
|
||||
"link-args=--preload-file pacman/assets/game/",
|
||||
]
|
||||
runner = "node"
|
||||
|
||||
@@ -15,5 +18,6 @@ rustflags = [
|
||||
# The `sdl2` crate's build script uses `libpng`, which requires `zlib`.
|
||||
# By adding `-lz` here, we ensure it's passed to the linker after `libpng`,
|
||||
# which is required for the linker to correctly resolve symbols.
|
||||
"-C", "link-arg=-lz",
|
||||
"-C",
|
||||
"link-arg=-lz",
|
||||
]
|
||||
|
||||
@@ -1,12 +1,28 @@
|
||||
[profile.default]
|
||||
fail-fast = false
|
||||
slow-timeout = { period = "5s", terminate-after = 3 } # max 15 seconds
|
||||
retries = 1
|
||||
|
||||
# CI machines are pretty slow, so we need to increase the timeout
|
||||
[profile.ci]
|
||||
slow-timeout = { period = "30s", terminate-after = 4 } # max 2 minutes for slow tests
|
||||
|
||||
# Coverage works even slower, so we need to increase the timeout
|
||||
[profile.coverage]
|
||||
slow-timeout = { period = "45s", terminate-after = 5 } # max 3.75 minutes for slow tests
|
||||
status-level = "none"
|
||||
|
||||
# Integration tests in SDL2 run serially (may not be required)
|
||||
[[profile.default.overrides]]
|
||||
filter = 'test(pacman::game::)'
|
||||
test-group = 'serial'
|
||||
|
||||
# Integration tests run max 4 at a time
|
||||
[[profile.default.overrides]]
|
||||
filter = 'test(pacman-server::tests::oauth)'
|
||||
test-group = 'integration'
|
||||
|
||||
[test-groups]
|
||||
# Ensure serial tests don't run in parallel
|
||||
serial = { max-threads = 1 }
|
||||
integration = { max-threads = 4 }
|
||||
|
||||
15
.dockerignore
Normal file
@@ -0,0 +1,15 @@
|
||||
# Build artifacts
|
||||
/target
|
||||
/emsdk
|
||||
*.exe
|
||||
|
||||
/pacman/assets
|
||||
/assets
|
||||
|
||||
# Development files
|
||||
/.git
|
||||
/*.md
|
||||
/Justfile
|
||||
/bacon.toml
|
||||
/rust-toolchain.toml
|
||||
/rustfmt.toml
|
||||
80
.github/dependabot.yml
vendored
@@ -1,20 +1,86 @@
|
||||
# Dependabot Configuration
|
||||
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
#
|
||||
# Strategy:
|
||||
# - Weekly checks for faster vulnerability detection
|
||||
# - Separate patch/minor/major updates to prevent blocking
|
||||
# - Auto-merge patches via GitHub branch protection rules
|
||||
# - Limit concurrent PRs to avoid spam
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
# Cargo workspace (all Rust crates)
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
open-pull-requests-limit: 5
|
||||
ignore:
|
||||
# Bevy ECS 0.17+ requires API migration
|
||||
- dependency-name: "bevy_ecs"
|
||||
versions: ["0.17.x", "0.18.x", "0.19.x"]
|
||||
# jsonwebtoken 10+ requires crypto backend feature flag
|
||||
- dependency-name: "jsonwebtoken"
|
||||
versions: ["10.x", "11.x"]
|
||||
groups:
|
||||
dependencies:
|
||||
rust-patches:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["patch"]
|
||||
rust-minor:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["minor"]
|
||||
rust-major:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["major"]
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "rust"
|
||||
|
||||
# Frontend (web/)
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
open-pull-requests-limit: 5
|
||||
groups:
|
||||
frontend-patches:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["patch"]
|
||||
frontend-minor:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["minor"]
|
||||
frontend-major-framework:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["major"]
|
||||
patterns:
|
||||
- "*"
|
||||
- "react"
|
||||
- "react-dom"
|
||||
- "vike"
|
||||
- "vite"
|
||||
frontend-major-other:
|
||||
applies-to: "version-updates"
|
||||
update-types: ["major"]
|
||||
exclude-patterns:
|
||||
- "react"
|
||||
- "react-dom"
|
||||
- "vike"
|
||||
- "vite"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "frontend"
|
||||
|
||||
# GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
open-pull-requests-limit: 5
|
||||
groups:
|
||||
dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
github-actions:
|
||||
patterns: ["*"]
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "github-actions"
|
||||
|
||||
114
.github/workflows/build.yaml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -23,11 +23,11 @@ jobs:
|
||||
target: x86_64-unknown-linux-gnu
|
||||
artifact_name: pacman
|
||||
toolchain: 1.86.0
|
||||
- os: macos-13
|
||||
- os: macos-15-intel
|
||||
target: x86_64-apple-darwin
|
||||
artifact_name: pacman
|
||||
toolchain: 1.86.0
|
||||
- os: macos-latest
|
||||
- os: macos-15
|
||||
target: aarch64-apple-darwin
|
||||
artifact_name: pacman
|
||||
toolchain: 1.86.0
|
||||
@@ -49,13 +49,23 @@ jobs:
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Get vcpkg baseline
|
||||
id: vcpkg_version
|
||||
shell: bash
|
||||
run: |
|
||||
VCPKG_REV=$(cargo metadata --format-version 1 --no-deps | jq -r '.packages[] | select(.name == "pacman") | .metadata.vcpkg.rev // "unknown"')
|
||||
echo "version=$VCPKG_REV" >> $GITHUB_OUTPUT
|
||||
working-directory: pacman
|
||||
|
||||
- name: Cache vcpkg
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: target/vcpkg
|
||||
key: A-vcpkg-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('Cargo.toml', 'Cargo.lock') }}
|
||||
path: |
|
||||
target/vcpkg
|
||||
~/.cargo/bin/cargo-vcpkg
|
||||
key: vcpkg-${{ steps.vcpkg_version.outputs.version }}-${{ runner.os }}-${{ matrix.target }}
|
||||
restore-keys: |
|
||||
A-vcpkg-${{ runner.os }}-${{ matrix.target }}-
|
||||
vcpkg-${{ steps.vcpkg_version.outputs.version }}-${{ runner.os }}-
|
||||
|
||||
- name: Vcpkg Linux Dependencies
|
||||
if: runner.os == 'Linux'
|
||||
@@ -63,13 +73,17 @@ jobs:
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libltdl-dev
|
||||
|
||||
- name: Vcpkg
|
||||
- name: Setup vcpkg
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install cargo-vcpkg
|
||||
cargo vcpkg -v build
|
||||
echo "VCPKG_ROOT=${{ github.workspace }}/target/vcpkg" >> $GITHUB_ENV
|
||||
working-directory: pacman
|
||||
|
||||
- name: Build
|
||||
run: cargo build --release
|
||||
working-directory: pacman
|
||||
|
||||
- name: Acquire Package Version
|
||||
id: get_version
|
||||
@@ -77,94 +91,12 @@ jobs:
|
||||
run: |
|
||||
set -euo pipefail # exit on error
|
||||
echo "version=$(cargo metadata --format-version 1 --no-deps | jq '.packages[0].version' -r)" >> $GITHUB_OUTPUT
|
||||
working-directory: pacman
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: "pacman-${{ steps.get_version.outputs.version }}-${{ matrix.target }}"
|
||||
path: ./target/release/${{ matrix.artifact_name }}
|
||||
retention-days: 7
|
||||
if-no-files-found: error
|
||||
|
||||
wasm:
|
||||
name: Build (wasm32-unknown-emscripten)
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pages: write
|
||||
id-token: write
|
||||
# concurrency group is used to prevent multiple page deployments from being attempted at the same time
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-wasm
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Emscripten SDK
|
||||
uses: pyodide/setup-emsdk@v15
|
||||
with:
|
||||
version: 3.1.43
|
||||
actions-cache-folder: "emsdk-cache-b"
|
||||
|
||||
- name: Setup Rust (WASM32 Emscripten)
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
target: wasm32-unknown-emscripten
|
||||
toolchain: 1.86.0
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Build with Emscripten
|
||||
shell: bash
|
||||
run: |
|
||||
# Retry mechanism for Emscripten build - only retry on specific hash errors
|
||||
MAX_RETRIES=3
|
||||
RETRY_DELAY=30
|
||||
|
||||
for attempt in $(seq 1 $MAX_RETRIES); do
|
||||
echo "Build attempt $attempt of $MAX_RETRIES"
|
||||
|
||||
# Capture output and check for specific error while preserving real-time output
|
||||
if bun run -i web.build.ts 2>&1 | tee /tmp/build_output.log; then
|
||||
echo "Build successful on attempt $attempt"
|
||||
break
|
||||
else
|
||||
echo "Build failed on attempt $attempt"
|
||||
|
||||
# Check if the failure was due to the specific hash error
|
||||
if grep -q "emcc: error: Unexpected hash:" /tmp/build_output.log; then
|
||||
echo "::warning::Detected 'emcc: error: Unexpected hash:' error - will retry (attempt $attempt of $MAX_RETRIES)"
|
||||
|
||||
if [ $attempt -eq $MAX_RETRIES ]; then
|
||||
echo "::error::All retry attempts failed. Exiting with error."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting $RETRY_DELAY seconds before retry..."
|
||||
sleep $RETRY_DELAY
|
||||
|
||||
# Exponential backoff: double the delay for next attempt
|
||||
RETRY_DELAY=$((RETRY_DELAY * 2))
|
||||
else
|
||||
echo "Build failed but not due to hash error - not retrying"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
with:
|
||||
path: "./dist/"
|
||||
retention-days: 7
|
||||
|
||||
- name: Deploy
|
||||
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
4
.github/workflows/checks.yaml
vendored
@@ -47,14 +47,18 @@ jobs:
|
||||
run: |
|
||||
cargo install cargo-vcpkg
|
||||
cargo vcpkg -v build
|
||||
working-directory: pacman
|
||||
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- -D warnings
|
||||
working-directory: pacman
|
||||
|
||||
- name: Check formatting
|
||||
run: cargo fmt -- --check
|
||||
working-directory: pacman
|
||||
|
||||
- uses: taiki-e/install-action@cargo-audit
|
||||
|
||||
- name: Run security audit
|
||||
run: cargo audit
|
||||
working-directory: pacman
|
||||
|
||||
3
.github/workflows/coverage.yaml
vendored
@@ -45,6 +45,7 @@ jobs:
|
||||
run: |
|
||||
cargo install cargo-vcpkg
|
||||
cargo vcpkg -v build
|
||||
working-directory: pacman
|
||||
|
||||
- uses: taiki-e/install-action@cargo-llvm-cov
|
||||
- uses: taiki-e/install-action@nextest
|
||||
@@ -53,8 +54,10 @@ jobs:
|
||||
- name: Generate coverage report
|
||||
run: |
|
||||
just coverage
|
||||
working-directory: pacman
|
||||
|
||||
- name: Coveralls upload
|
||||
if: ${{ secrets.COVERALLS_REPO_TOKEN != '' }}
|
||||
uses: coverallsapp/github-action@v2
|
||||
with:
|
||||
github-token: ${{ secrets.COVERALLS_REPO_TOKEN }}
|
||||
|
||||
139
.github/workflows/deploy.yaml
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
name: Deploy to Railway
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
name: Build and Deploy
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
digest: ${{ steps.docker_build.outputs.digest }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Emscripten SDK
|
||||
uses: pyodide/setup-emsdk@v15
|
||||
with:
|
||||
version: 3.1.43
|
||||
actions-cache-folder: "emsdk-cache-b"
|
||||
|
||||
- name: Setup Rust (WASM32 Emscripten)
|
||||
uses: dtolnay/rust-toolchain@master
|
||||
with:
|
||||
target: wasm32-unknown-emscripten
|
||||
toolchain: 1.86.0
|
||||
|
||||
- name: Rust Cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
# ========== WASM Build ==========
|
||||
- name: Build WASM with Emscripten
|
||||
shell: bash
|
||||
run: |
|
||||
# Retry mechanism for Emscripten build - only retry on specific hash errors
|
||||
MAX_RETRIES=3
|
||||
RETRY_DELAY=30
|
||||
|
||||
for attempt in $(seq 1 $MAX_RETRIES); do
|
||||
echo "Build attempt $attempt of $MAX_RETRIES"
|
||||
|
||||
# Capture output and check for specific error while preserving real-time output
|
||||
if bun run -i pacman/web.build.ts 2>&1 | tee /tmp/build_output.log; then
|
||||
echo "Build successful on attempt $attempt"
|
||||
break
|
||||
else
|
||||
echo "Build failed on attempt $attempt"
|
||||
|
||||
# Check if the failure was due to the specific hash error
|
||||
if grep -q "emcc: error: Unexpected hash:" /tmp/build_output.log; then
|
||||
echo "::warning::Detected 'emcc: error: Unexpected hash:' error - will retry (attempt $attempt of $MAX_RETRIES)"
|
||||
|
||||
if [ $attempt -eq $MAX_RETRIES ]; then
|
||||
echo "::error::All retry attempts failed. Exiting with error."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Waiting $RETRY_DELAY seconds before retry..."
|
||||
sleep $RETRY_DELAY
|
||||
|
||||
# Exponential backoff: double the delay for next attempt
|
||||
RETRY_DELAY=$((RETRY_DELAY * 2))
|
||||
else
|
||||
echo "Build failed but not due to hash error - not retrying"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# ========== Docker Build and Push ==========
|
||||
# Note: Frontend is now built inside Docker using multi-stage build
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=sha,prefix={{branch}}-
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push Docker image
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./pacman-server/Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
build-args: |
|
||||
GIT_COMMIT_SHA=${{ github.sha }}
|
||||
|
||||
# Wait for ghcr.io propagation (paranoid safety)
|
||||
- name: Wait for registry propagation
|
||||
run: sleep 5
|
||||
|
||||
# Deploy to Railway - separate job to use container properly
|
||||
deploy:
|
||||
name: Deploy to Railway
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-and-deploy
|
||||
container: ghcr.io/railwayapp/cli:latest
|
||||
env:
|
||||
RAILWAY_TOKEN: ${{ secrets.RAILWAY_TOKEN }}
|
||||
steps:
|
||||
- name: Generate proxy Dockerfile
|
||||
run: echo "FROM ghcr.io/xevion/pac-man@${{ needs.build-and-deploy.outputs.digest }}" > Dockerfile
|
||||
|
||||
- name: Deploy to Railway
|
||||
run: railway up --service pac-man
|
||||
4
.github/workflows/tests.yaml
vendored
@@ -46,8 +46,10 @@ jobs:
|
||||
run: |
|
||||
cargo install cargo-vcpkg
|
||||
cargo vcpkg -v build
|
||||
working-directory: pacman
|
||||
|
||||
- uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run nextest
|
||||
run: cargo nextest run --workspace
|
||||
run: cargo nextest run --workspace --profile ci
|
||||
working-directory: pacman
|
||||
|
||||
13
.gitignore
vendored
@@ -1,16 +1,24 @@
|
||||
# IDE, Other files
|
||||
.vscode
|
||||
.idea
|
||||
.claude/
|
||||
rust-sdl2-emscripten/
|
||||
|
||||
# Build files
|
||||
target/
|
||||
dist/
|
||||
emsdk/
|
||||
node_modules/
|
||||
|
||||
# Emscripten build outputs (generated by cargo build)
|
||||
web/public/pacman.data
|
||||
web/public/pacman.js
|
||||
web/public/pacman.wasm
|
||||
web/public/pacman.wasm.map
|
||||
|
||||
# Site build f iles
|
||||
tailwindcss-*
|
||||
assets/site/build.css
|
||||
pacman/assets/site/build.css
|
||||
|
||||
# Coverage reports
|
||||
lcov.info
|
||||
@@ -23,3 +31,6 @@ flamegraph.svg
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
|
||||
# Sensitive
|
||||
*.env
|
||||
|
||||
@@ -30,14 +30,14 @@ repos:
|
||||
|
||||
- id: cargo-check
|
||||
name: cargo check
|
||||
entry: cargo check --all-targets
|
||||
entry: cargo check --workspace --all-targets
|
||||
language: system
|
||||
types_or: [rust, cargo, cargo-lock]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-check-wasm
|
||||
name: cargo check for wasm32-unknown-emscripten
|
||||
entry: cargo check --all-targets --target=wasm32-unknown-emscripten
|
||||
entry: cargo check -p pacman --all-targets --target=wasm32-unknown-emscripten
|
||||
language: system
|
||||
types_or: [rust, cargo, cargo-lock]
|
||||
pass_filenames: false
|
||||
|
||||
196
.scripts/postgres.ts
Normal file
@@ -0,0 +1,196 @@
|
||||
import { $ } from "bun";
|
||||
import { readFileSync, writeFileSync, existsSync } from "fs";
|
||||
import { join, dirname } from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
import { createInterface } from "readline";
|
||||
|
||||
// Constants for container and volume names
|
||||
const CONTAINER_NAME = "pacman-server-postgres";
|
||||
const VOLUME_NAME = "pacman-postgres-data";
|
||||
|
||||
// Helper function to get user input
|
||||
async function getUserChoice(
|
||||
prompt: string,
|
||||
choices: string[],
|
||||
defaultIndex: number = 1
|
||||
): Promise<string> {
|
||||
// Check if we're in an interactive TTY
|
||||
if (!process.stdin.isTTY) {
|
||||
console.log(
|
||||
"Non-interactive environment detected; selecting default option " +
|
||||
defaultIndex
|
||||
);
|
||||
return String(defaultIndex);
|
||||
}
|
||||
|
||||
console.log(prompt);
|
||||
choices.forEach((choice, index) => {
|
||||
console.log(`${index + 1}. ${choice}`);
|
||||
});
|
||||
|
||||
// Use readline for interactive input
|
||||
const rl = createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout,
|
||||
});
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const askForChoice = () => {
|
||||
rl.question("Enter your choice (1-3): ", (answer) => {
|
||||
const choice = answer.trim();
|
||||
if (["1", "2", "3"].includes(choice)) {
|
||||
rl.close();
|
||||
resolve(choice);
|
||||
} else {
|
||||
console.log("Invalid choice. Please enter 1, 2, or 3.");
|
||||
askForChoice();
|
||||
}
|
||||
});
|
||||
};
|
||||
askForChoice();
|
||||
});
|
||||
}
|
||||
|
||||
// Get repository root path from script location
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
const repoRoot = join(__dirname, "..");
|
||||
const envPath = join(repoRoot, "pacman-server", ".env");
|
||||
|
||||
console.log("Checking for .env file...");
|
||||
|
||||
// Check if .env file exists and read it
|
||||
let envContent = "";
|
||||
let envLines: string[] = [];
|
||||
let databaseUrlLine = -1;
|
||||
let databaseUrlValue = "";
|
||||
|
||||
if (existsSync(envPath)) {
|
||||
console.log("Found .env file, reading...");
|
||||
envContent = readFileSync(envPath, "utf-8");
|
||||
envLines = envContent.split("\n");
|
||||
|
||||
// Parse .env file for DATABASE_URL
|
||||
for (let i = 0; i < envLines.length; i++) {
|
||||
const line = envLines[i].trim();
|
||||
if (line.match(/^[A-Z_][A-Z0-9_]*=.*$/)) {
|
||||
if (line.startsWith("DATABASE_URL=")) {
|
||||
databaseUrlLine = i;
|
||||
databaseUrlValue = line.substring(13); // Remove "DATABASE_URL="
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log("No .env file found, will create one");
|
||||
}
|
||||
|
||||
// Determine user's choice
|
||||
let userChoice = "2"; // Default to print
|
||||
|
||||
if (databaseUrlLine !== -1) {
|
||||
console.log(`Found existing DATABASE_URL: ${databaseUrlValue}`);
|
||||
userChoice = await getUserChoice("\nChoose an action:", [
|
||||
"Quit",
|
||||
"Print (create container, print DATABASE_URL)",
|
||||
"Replace (update DATABASE_URL in .env)",
|
||||
]);
|
||||
|
||||
if (userChoice === "1") {
|
||||
console.log("Exiting...");
|
||||
process.exit(0);
|
||||
}
|
||||
} else {
|
||||
console.log("No existing DATABASE_URL found");
|
||||
|
||||
// Ask what to do when no .env file or DATABASE_URL exists
|
||||
if (!existsSync(envPath)) {
|
||||
userChoice = await getUserChoice(
|
||||
"\nNo .env file found. What would you like to do?",
|
||||
[
|
||||
"Print (create container, print DATABASE_URL)",
|
||||
"Create .env file and add DATABASE_URL",
|
||||
"Quit",
|
||||
]
|
||||
);
|
||||
|
||||
if (userChoice === "3") {
|
||||
console.log("Exiting...");
|
||||
process.exit(0);
|
||||
}
|
||||
} else {
|
||||
console.log("Will add DATABASE_URL to existing .env file");
|
||||
}
|
||||
}
|
||||
|
||||
// Check if container exists
|
||||
console.log("Checking for existing container...");
|
||||
const containerExists =
|
||||
await $`docker ps -a --filter name=${CONTAINER_NAME} --format "{{.Names}}"`
|
||||
.text()
|
||||
.then((names) => names.trim() === CONTAINER_NAME)
|
||||
.catch(() => false);
|
||||
|
||||
let shouldReplaceContainer = false;
|
||||
|
||||
if (containerExists) {
|
||||
console.log("Container already exists");
|
||||
|
||||
// Always ask what to do if container exists
|
||||
const replaceChoice = await getUserChoice(
|
||||
"\nContainer exists. What would you like to do?",
|
||||
["Use existing container", "Replace container (remove and create new)"],
|
||||
1
|
||||
);
|
||||
shouldReplaceContainer = replaceChoice === "2";
|
||||
|
||||
if (shouldReplaceContainer) {
|
||||
console.log("Removing existing container...");
|
||||
await $`docker rm --force --volumes ${CONTAINER_NAME}`;
|
||||
|
||||
// Explicitly remove the named volume to ensure clean state
|
||||
console.log("Removing volume...");
|
||||
await $`docker volume rm ${VOLUME_NAME}`.catch(() => {
|
||||
console.log("Volume doesn't exist or already removed");
|
||||
});
|
||||
} else {
|
||||
console.log("Using existing container");
|
||||
}
|
||||
}
|
||||
|
||||
// Create container if needed
|
||||
if (!containerExists || shouldReplaceContainer) {
|
||||
console.log("Creating PostgreSQL container...");
|
||||
await $`docker run --detach --name ${CONTAINER_NAME} --publish 5432:5432 --volume ${VOLUME_NAME}:/var/lib/postgresql/data --env POSTGRES_USER=postgres --env POSTGRES_PASSWORD=postgres --env POSTGRES_DB=pacman-server postgres:17`;
|
||||
}
|
||||
|
||||
// Format DATABASE_URL
|
||||
const databaseUrl =
|
||||
"postgresql://postgres:postgres@127.0.0.1:5432/pacman-server";
|
||||
|
||||
// Handle the final action based on user choice
|
||||
if (userChoice === "2") {
|
||||
// Print option
|
||||
console.log(`\nDATABASE_URL=${databaseUrl}`);
|
||||
} else if (
|
||||
userChoice === "3" ||
|
||||
(databaseUrlLine === -1 && userChoice === "2")
|
||||
) {
|
||||
// Replace or add to .env file
|
||||
if (databaseUrlLine !== -1) {
|
||||
// Replace existing line
|
||||
console.log("Updating DATABASE_URL in .env file...");
|
||||
envLines[databaseUrlLine] = `DATABASE_URL=${databaseUrl}`;
|
||||
writeFileSync(envPath, envLines.join("\n"));
|
||||
console.log("Updated .env file");
|
||||
} else {
|
||||
// Add new line
|
||||
console.log("Adding DATABASE_URL to .env file...");
|
||||
const newContent =
|
||||
envContent +
|
||||
(envContent.endsWith("\n") ? "" : "\n") +
|
||||
`DATABASE_URL=${databaseUrl}\n`;
|
||||
writeFileSync(envPath, newContent);
|
||||
console.log("Added to .env file");
|
||||
}
|
||||
}
|
||||
5220
Cargo.lock
generated
96
Cargo.toml
@@ -1,10 +1,10 @@
|
||||
[package]
|
||||
name = "pacman"
|
||||
version = "0.80.3"
|
||||
[workspace]
|
||||
members = ["pacman", "pacman-common", "pacman-server"]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
authors = ["Xevion"]
|
||||
edition = "2021"
|
||||
rust-version = "1.86.0"
|
||||
description = "A cross-platform retro Pac-Man clone, written in Rust and supported by SDL2"
|
||||
readme = true
|
||||
homepage = "https://pacman.xevion.dev"
|
||||
repository = "https://github.com/Xevion/Pac-Man"
|
||||
@@ -12,67 +12,13 @@ license = "GPL-3.0-or-later"
|
||||
keywords = ["game", "pacman", "arcade", "sdl2"]
|
||||
categories = ["games", "emulators"]
|
||||
publish = false
|
||||
exclude = ["/assets/unpacked/**", "/assets/site/**", "/bacon.toml", "/Justfile"]
|
||||
default-run = "pacman"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[profile.dev]
|
||||
incremental = true
|
||||
|
||||
[dependencies]
|
||||
bevy_ecs = "0.16.1"
|
||||
glam = "0.30.5"
|
||||
pathfinding = "4.14"
|
||||
tracing = { version = "0.1.41", features = ["max_level_trace", "release_max_level_debug"]}
|
||||
tracing-error = "0.2.0"
|
||||
tracing-subscriber = {version = "0.3.20", features = ["env-filter"]}
|
||||
time = { version = "0.3.43", features = ["formatting", "macros"] }
|
||||
thiserror = "2.0.16"
|
||||
anyhow = "1.0"
|
||||
smallvec = "1.15.1"
|
||||
bitflags = "2.9.4"
|
||||
micromap = "0.1.0"
|
||||
circular-buffer = "1.1.0"
|
||||
parking_lot = "0.12.3"
|
||||
strum = "0.27.2"
|
||||
strum_macros = "0.27.2"
|
||||
thousands = "0.2.0"
|
||||
num-width = "0.1.0"
|
||||
# While not actively used in code, `build.rs` generates code that relies on this. Keep the versions synchronized.
|
||||
phf = { version = "0.13.1", features = ["macros"] }
|
||||
|
||||
# Windows-specific dependencies
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
# Used for customizing console output on Windows; both are required due to the `windows` crate having poor Result handling with `GetStdHandle`.
|
||||
windows = { version = "0.62.0", features = ["Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console"] }
|
||||
windows-sys = { version = "0.61.0", features = ["Win32_System_Console"] }
|
||||
|
||||
# Desktop-specific dependencies
|
||||
[target.'cfg(not(target_os = "emscripten"))'.dependencies]
|
||||
# On desktop platforms, build SDL2 with cargo-vcpkg
|
||||
sdl2 = { version = "0.38", default-features = false, features = ["image", "ttf", "gfx", "mixer", "unsafe_textures", "static-link", "use-vcpkg"] }
|
||||
rand = { version = "0.9.2", default-features = false, features = ["thread_rng"] }
|
||||
rust-embed = "8.7.2"
|
||||
spin_sleep = "1.3.3"
|
||||
|
||||
# Browser-specific dependencies
|
||||
[target.'cfg(target_os = "emscripten")'.dependencies]
|
||||
# On Emscripten, we don't use cargo-vcpkg
|
||||
sdl2 = { version = "0.38", default-features = false, features = ["image", "ttf", "gfx", "mixer", "unsafe_textures"] }
|
||||
# TODO: Document why Emscripten cannot use `os_rng`.
|
||||
rand = { version = "0.9.2", default-features = false, features = ["small_rng", "os_rng"] }
|
||||
libc = "0.2.175" # TODO: Describe why this is required.
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.4.1"
|
||||
speculoos = "0.13.0"
|
||||
|
||||
[build-dependencies]
|
||||
phf = { version = "0.13.1", features = ["macros"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0.143"
|
||||
|
||||
# phf generates runtime code which machete will not detect
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = ["phf"]
|
||||
# Improve build times by optimizing sqlx-macros
|
||||
[profile.dev.package.sqlx-macros]
|
||||
opt-level = 3
|
||||
|
||||
# Release profile for profiling (essentially the default 'release' profile with debug enabled)
|
||||
[profile.profile]
|
||||
@@ -81,7 +27,8 @@ debug = true
|
||||
# Undo the customizations for our release profile
|
||||
opt-level = 3
|
||||
lto = false
|
||||
panic = 'unwind'
|
||||
panic = "abort"
|
||||
strip = "symbols"
|
||||
|
||||
# Optimized release profile for size
|
||||
[profile.release]
|
||||
@@ -94,22 +41,3 @@ panic = "abort"
|
||||
[profile.dev-release]
|
||||
inherits = "dev"
|
||||
debug-assertions = false
|
||||
|
||||
[package.metadata.vcpkg]
|
||||
dependencies = ["sdl2", "sdl2-image", "sdl2-ttf", "sdl2-gfx", "sdl2-mixer"]
|
||||
git = "https://github.com/microsoft/vcpkg"
|
||||
rev = "2024.05.24" # to check for a new one, check https://github.com/microsoft/vcpkg/releases
|
||||
|
||||
[package.metadata.vcpkg.target]
|
||||
x86_64-pc-windows-msvc = { triplet = "x64-windows-static-md" }
|
||||
x86_64-unknown-linux-gnu = { triplet = "x64-linux" }
|
||||
x86_64-apple-darwin = { triplet = "x64-osx" }
|
||||
aarch64-apple-darwin = { triplet = "arm64-osx" }
|
||||
|
||||
[features]
|
||||
# Windows-specific features
|
||||
force-console = []
|
||||
default = []
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage,coverage_nightly)', 'cfg(use_console)'] }
|
||||
|
||||
86
Justfile
@@ -1,47 +1,79 @@
|
||||
set shell := ["bash", "-c"]
|
||||
set windows-shell := ["powershell.exe", "-NoLogo", "-Command"]
|
||||
|
||||
|
||||
binary_extension := if os() == "windows" { ".exe" } else { "" }
|
||||
|
||||
# !!! --ignore-filename-regex should be used on both reports & coverage testing
|
||||
# !!! --remap-path-prefix prevents the absolute path from being used in the generated report
|
||||
# Display available recipes
|
||||
default:
|
||||
just --list
|
||||
|
||||
# Generate HTML report (for humans, source line inspection)
|
||||
# Open HTML coverage report
|
||||
html: coverage
|
||||
cargo llvm-cov report \
|
||||
--remap-path-prefix \
|
||||
--html \
|
||||
--open
|
||||
cargo llvm-cov report \
|
||||
# prevents the absolute path from being used in the generated report
|
||||
--remap-path-prefix \
|
||||
--html \
|
||||
--open
|
||||
|
||||
# Display report (for humans)
|
||||
# Display coverage report
|
||||
report-coverage: coverage
|
||||
cargo llvm-cov report --remap-path-prefix
|
||||
cargo llvm-cov report --remap-path-prefix
|
||||
|
||||
# Run & generate LCOV report (as base report)
|
||||
# Generate baseline LCOV report
|
||||
coverage:
|
||||
cargo +nightly llvm-cov \
|
||||
--lcov \
|
||||
--remap-path-prefix \
|
||||
--workspace \
|
||||
--output-path lcov.info \
|
||||
--profile coverage \
|
||||
--no-fail-fast nextest
|
||||
cargo +nightly llvm-cov \
|
||||
--lcov \
|
||||
--remap-path-prefix \
|
||||
--workspace \
|
||||
--output-path lcov.info \
|
||||
--profile coverage \
|
||||
--no-fail-fast nextest
|
||||
|
||||
# Profile the project using 'samply'
|
||||
# Profile the project using samply
|
||||
samply:
|
||||
cargo build --profile profile
|
||||
samply record ./target/profile/pacman{{ binary_extension }}
|
||||
cargo build --profile profile
|
||||
samply record ./target/profile/pacman{{ binary_extension }}
|
||||
|
||||
# Build the project for Emscripten
|
||||
web *args:
|
||||
bun run web.build.ts {{args}};
|
||||
caddy file-server --root dist
|
||||
bun run pacman/web.build.ts {{args}}
|
||||
bun run --cwd web build
|
||||
caddy file-server --root web/dist/client
|
||||
|
||||
# Run cargo fix
|
||||
# Fix linting errors & formatting
|
||||
fix:
|
||||
cargo fix --workspace --lib --allow-dirty
|
||||
cargo fmt --all
|
||||
cargo fix --workspace --lib --allow-dirty
|
||||
cargo fmt --all
|
||||
|
||||
# Push commits & tags
|
||||
push:
|
||||
git push origin --tags && git push
|
||||
git push origin --tags;
|
||||
git push
|
||||
|
||||
# Create a postgres container for the server
|
||||
server-postgres:
|
||||
bun run .scripts/postgres.ts
|
||||
|
||||
# Build the server image
|
||||
server-image:
|
||||
# build the server image
|
||||
docker build \
|
||||
--platform linux/amd64 \
|
||||
--file ./pacman-server/Dockerfile \
|
||||
--tag pacman-server \
|
||||
.
|
||||
|
||||
# Build and run the server in a Docker container
|
||||
run-server: server-image
|
||||
# remove the server container if it exists
|
||||
docker rm --force --volumes pacman-server
|
||||
|
||||
# run the server container
|
||||
docker run \
|
||||
--rm \
|
||||
--stop-timeout 2 \
|
||||
--name pacman-server \
|
||||
--publish 3000:3000 \
|
||||
--env PORT=3000 \
|
||||
--env-file pacman-server/.env \
|
||||
pacman-server
|
||||
|
||||
13
README.md
@@ -2,7 +2,7 @@
|
||||
<!-- markdownlint-disable MD041 -->
|
||||
|
||||
<div align="center">
|
||||
<img src="assets/repo/banner.png" alt="Pac-Man Banner Screenshot">
|
||||
<img src="assets/banner.png" alt="Pac-Man Banner Screenshot">
|
||||
</div>
|
||||
|
||||
# Pac-Man
|
||||
@@ -49,16 +49,16 @@ However, every commit has build artifacts, so you can grab the [latest build art
|
||||
## Screenshots
|
||||
|
||||
<div align="center">
|
||||
<img src="assets/repo/screenshots/0.png" alt="Screenshot 0 - Starting Game">
|
||||
<img src="assets/screenshots/0.png" alt="Screenshot 0 - Starting Game">
|
||||
<p><em>Starting a new game</em></p>
|
||||
|
||||
<img src="assets/repo/screenshots/1.png" alt="Screenshot 1 - Eating Dots">
|
||||
<img src="assets/screenshots/1.png" alt="Screenshot 1 - Eating Dots">
|
||||
<p><em>Pac-Man collecting dots and avoiding ghosts</em></p>
|
||||
|
||||
<img src="assets/repo/screenshots/2.png" alt="Screenshot 2 - Game Over">
|
||||
<img src="assets/screenshots/2.png" alt="Screenshot 2 - Game Over">
|
||||
<p><em>Game over screen after losing all lives</em></p>
|
||||
|
||||
<img src="assets/repo/screenshots/3.png" alt="Screenshot 3 - Debug Mode">
|
||||
<img src="assets/screenshots/3.png" alt="Screenshot 3 - Debug Mode">
|
||||
<p><em>Debug mode showing hitboxes, node graph, and performance details.</em></p>
|
||||
</div>
|
||||
|
||||
@@ -85,7 +85,8 @@ You can read the [roadmap](ROADMAP.md) file for more details on the project's go
|
||||
|
||||
Since this project is still in progress, I'm only going to cover non-obvious build details. By reading the code, build scripts, and copying the online build workflows, you should be able to replicate the build process.
|
||||
|
||||
- Install `cargo-vcpkg` with `cargo install cargo-vcpkg`, then run `cargo vcpkg build` to build the requisite dependencies via vcpkg.
|
||||
- Install `cargo-vcpkg` with `cargo install cargo-vcpkg`, then run `cargo vcpkg build --manifest-path pacman/Cargo.toml` to build the requisite dependencies via vcpkg.
|
||||
- `--manifest-path` is only required if you run it from the root directory; you can omit it if you `cd` into the `pacman` directory first.
|
||||
- This is only required for the desktop builds, not the web build.
|
||||
- We use rustc 1.86.0 for the build, due to bulk-memory-opt related issues on wasm32-unknown-emscripten.
|
||||
- Technically, we could probably use stable or even nightly on desktop targets, but using different versions for different targets is a pain, mainly because of clippy warnings changing between versions.
|
||||
|
||||
|
Before Width: | Height: | Size: 9.3 KiB After Width: | Height: | Size: 9.3 KiB |
|
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 26 KiB |
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
61
build.rs
@@ -1,61 +1,4 @@
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::fs::File;
|
||||
use std::io::{BufWriter, Write};
|
||||
use std::path::Path;
|
||||
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AtlasMapper {
|
||||
frames: HashMap<String, MapperFrame>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Deserialize)]
|
||||
struct MapperFrame {
|
||||
x: u16,
|
||||
y: u16,
|
||||
width: u16,
|
||||
height: u16,
|
||||
}
|
||||
|
||||
impl MapperFrame {
|
||||
fn to_u16vec2_format(self) -> String {
|
||||
format!(
|
||||
"MapperFrame {{ pos: glam::U16Vec2::new({}, {}), size: glam::U16Vec2::new({}, {}) }}",
|
||||
self.x, self.y, self.width, self.height
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let path = Path::new(&env::var("OUT_DIR").unwrap()).join("atlas_data.rs");
|
||||
let mut file = BufWriter::new(File::create(&path).unwrap());
|
||||
|
||||
let atlas_json = include_str!("./assets/game/atlas.json");
|
||||
let atlas_mapper: AtlasMapper = serde_json::from_str(atlas_json).unwrap();
|
||||
|
||||
writeln!(&mut file, "use phf::phf_map;").unwrap();
|
||||
|
||||
writeln!(&mut file, "use crate::texture::sprite::MapperFrame;").unwrap();
|
||||
|
||||
writeln!(
|
||||
&mut file,
|
||||
"pub static ATLAS_FRAMES: phf::Map<&'static str, MapperFrame> = phf_map! {{"
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
for (name, frame) in atlas_mapper.frames {
|
||||
writeln!(&mut file, " \"{}\" => {},", name, frame.to_u16vec2_format()).unwrap();
|
||||
}
|
||||
|
||||
writeln!(&mut file, "}};").unwrap();
|
||||
println!("cargo:rerun-if-changed=assets/game/atlas.json");
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if cfg!(any(feature = "force-console", debug_assertions)) {
|
||||
println!("cargo:rustc-cfg=use_console");
|
||||
}
|
||||
}
|
||||
// trigger recompilation when a new migration is added
|
||||
println!("cargo:rerun-if-changed=migrations");
|
||||
}
|
||||
|
||||
17
bun.lock
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "pacman",
|
||||
"devDependencies": {
|
||||
"@logtape/logtape": "^0.8.0",
|
||||
"ts-pattern": "^5.5.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@logtape/logtape": ["@logtape/logtape@0.8.2", "", {}, "sha512-KikaMHi64p0BHYrYOE2Lom4dOE3R8PGT+21QJ5Ql/SWy0CNOp69dkAlG9RXzENQ6PAMWtiU+4kelJYNvfUvHOQ=="],
|
||||
|
||||
"ts-pattern": ["ts-pattern@5.9.0", "", {}, "sha512-6s5V71mX8qBUmlgbrfL33xDUwO0fq48rxAu2LBE11WBeGdpCPOsXksQbZJHvHwhrd3QjUusd3mAOM5Gg0mFBLg=="],
|
||||
}
|
||||
}
|
||||
8
package.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "pacman",
|
||||
"type": "module",
|
||||
"devDependencies": {
|
||||
"ts-pattern": "^5.5.0",
|
||||
"@logtape/logtape": "^0.8.0"
|
||||
}
|
||||
}
|
||||
16
pacman-common/Cargo.toml
Normal file
@@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "pacman-common"
|
||||
version = "0.1.1"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version = "1.86.0"
|
||||
description = "A meta crate for sharing common code between the `pacman` and `pacman-server` crates"
|
||||
readme.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
license.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
publish.workspace = true
|
||||
|
||||
[dependencies]
|
||||
9
pacman-common/README.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# pacman-common
|
||||
|
||||
This crate contains common code (mostly API request & response types) for the `pacman` and `pacman-server` (leaderboard API) crates.
|
||||
|
||||
You might be more interested in reading the README for the repostiory, the `pacman` crate, or the `pacman-server` crate.
|
||||
|
||||
- [README.md](../README.md)
|
||||
- [pacman/README.md](../pacman/README.md)
|
||||
- [pacman-server/README.md](../pacman-server/README.md)
|
||||
3
pacman-common/src/main.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
||||
74
pacman-server/Cargo.toml
Normal file
@@ -0,0 +1,74 @@
|
||||
[package]
|
||||
name = "pacman-server"
|
||||
version = "0.4.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version = "1.85.0"
|
||||
description = "A leaderboard API for the Pac-Man game"
|
||||
readme.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
license.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
publish.workspace = true
|
||||
default-run = "pacman-server"
|
||||
|
||||
[lib]
|
||||
name = "pacman_server"
|
||||
path = "src/lib.rs"
|
||||
|
||||
# I have no idea why s3-tokio isn't being detected as a dependency, but it's not.
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = ["s3-tokio"]
|
||||
|
||||
[dependencies]
|
||||
axum = { version = "0.8", features = ["macros"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
oauth2 = "5"
|
||||
reqwest = { version = "0.12", features = ["json"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
sqlx = { version = "0.8", features = [
|
||||
"runtime-tokio-rustls",
|
||||
"postgres",
|
||||
"chrono",
|
||||
] }
|
||||
chrono = { version = "0.4", features = ["serde", "clock"] }
|
||||
figment = { version = "0.10", features = ["env"] }
|
||||
dotenvy = "0.15"
|
||||
dashmap = "6.1"
|
||||
axum-cookie = "0.2"
|
||||
async-trait = "0.1"
|
||||
jsonwebtoken = { version = "9.3", default-features = false }
|
||||
tracing = "0.1.41"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "json"] }
|
||||
tracing-futures = { version = "0.2.5", features = ["tokio"] }
|
||||
tower-http = { version = "0.6", features = ["trace", "fs", "normalize-path"] }
|
||||
time = { version = "0.3", features = ["macros", "formatting"] }
|
||||
yansi = "1"
|
||||
s3-tokio = { version = "0.39.6", default-features = false }
|
||||
rustls = { version = "0.23", features = ["ring"] }
|
||||
image = { version = "0.25", features = ["png", "jpeg"] }
|
||||
sha2 = "0.10"
|
||||
mockall = "0.14.0"
|
||||
# validator = { version = "0.16", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
http = "1"
|
||||
hyper = { version = "1", features = ["server", "http1"] }
|
||||
hyper-util = { version = "0.1", features = ["server", "tokio", "http1"] }
|
||||
bytes = "1"
|
||||
anyhow = "1"
|
||||
axum-test = "18.1.0"
|
||||
pretty_assertions = "1.4.1"
|
||||
testcontainers = "0.25.2"
|
||||
bon = "3.7.2"
|
||||
cookie = "0.18.1"
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = [
|
||||
'cfg(coverage,coverage_nightly)',
|
||||
'cfg(use_console)',
|
||||
] }
|
||||
67
pacman-server/Dockerfile
Normal file
@@ -0,0 +1,67 @@
|
||||
ARG RUST_VERSION=1.89.0
|
||||
ARG GIT_COMMIT_SHA
|
||||
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-${RUST_VERSION} AS chef
|
||||
WORKDIR /app
|
||||
|
||||
# -- Planner stage --
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --bin pacman-server --recipe-path recipe.json
|
||||
|
||||
# -- Frontend builder stage --
|
||||
FROM oven/bun:1 AS frontend-builder
|
||||
WORKDIR /app
|
||||
|
||||
# Copy frontend package files first for layer caching
|
||||
COPY web/package.json web/bun.lock* ./
|
||||
RUN bun install --frozen-lockfile
|
||||
|
||||
# Copy all frontend source including public directory (contains WASM files)
|
||||
COPY web/ ./
|
||||
|
||||
# Build the frontend (Vite will copy public/ contents to dist/client/)
|
||||
RUN bun run build
|
||||
|
||||
# -- Backend builder stage --
|
||||
FROM chef AS builder
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
RUN cargo chef cook --release --bin pacman-server --recipe-path recipe.json
|
||||
|
||||
# Copy the source code AFTER, so that dependencies are already cached
|
||||
COPY . .
|
||||
|
||||
# Install build dependencies, then build the server
|
||||
RUN apt-get update && apt-get install -y pkg-config libssl-dev && rm -rf /var/lib/apt/lists/*
|
||||
RUN cargo build --package pacman-server --release --bin pacman-server
|
||||
|
||||
# -- Runtime stage --
|
||||
FROM debian:bookworm-slim AS runtime
|
||||
WORKDIR /app
|
||||
COPY --from=builder /app/target/release/pacman-server /usr/local/bin/pacman-server
|
||||
|
||||
# Copy frontend static files from frontend-builder stage
|
||||
COPY --from=frontend-builder /app/dist/client /app/static
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG TZ=Etc/UTC
|
||||
ENV TZ=${TZ}
|
||||
|
||||
# Optional build-time environment variable for embedding the Git commit SHA
|
||||
ARG GIT_COMMIT_SHA
|
||||
ENV GIT_COMMIT_SHA=${GIT_COMMIT_SHA}
|
||||
|
||||
# Specify PORT at build-time or run-time, default to 3000
|
||||
ARG PORT=3000
|
||||
ENV PORT=${PORT}
|
||||
EXPOSE ${PORT}
|
||||
|
||||
# Set static files directory for the server to serve
|
||||
ENV STATIC_FILES_DIR=/app/static
|
||||
|
||||
CMD ["sh", "-c", "exec /usr/local/bin/pacman-server"]
|
||||
63
pacman-server/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# pacman-server
|
||||
|
||||
Despite the naming of this crate, it's not a server for the Pac-Man game allowing multiplayer or anything super interesting.
|
||||
|
||||
This crate is a webserver that hosts an OAuth login and leaderboard API for the main `pacman` crate to hook into.
|
||||
|
||||
## Features
|
||||
|
||||
- [x] Axum Webserver
|
||||
- [x] Health Check
|
||||
- [ ] Inbound Rate Limiting
|
||||
- [ ] Outbound Rate Limiting
|
||||
- [ ] Provider Circuit Breaker
|
||||
- [x] Database
|
||||
- [x] OAuth
|
||||
- [x] Discord
|
||||
- [x] GitHub
|
||||
- [ ] Google
|
||||
- [ ] Leaderboard
|
||||
- [ ] Score Submission
|
||||
- [ ] Score Listings
|
||||
- [ ] Pagination
|
||||
- [ ] Global / Daily
|
||||
- [ ] Name Restrictions & Flagging
|
||||
- [ ] Avatars
|
||||
- [ ] 8-bit Conversion
|
||||
- [ ] Storage?
|
||||
- [ ] Common Server/Client Crate
|
||||
- [ ] CI/CD & Tests
|
||||
|
||||
## Todo
|
||||
|
||||
1. Refresh Token Handling (Encryption, Expiration & Refresh Timings)
|
||||
2. Refresh Token Background Job
|
||||
3. S3 Storage for Avatars
|
||||
4. Common Server/Client Crate, Basics
|
||||
5. Crate-level Log Level Configuration
|
||||
6. Span Tracing
|
||||
7. Avatar Pixelization
|
||||
8. Leaderboard API
|
||||
9. React-based Frontend
|
||||
10. Name Restrictions & Flagging
|
||||
11. Simple CI/CD Checks & Tests
|
||||
12. API Rate Limiting (outbound provider requests)
|
||||
13. API Rate Limiting (inbound requests, by IP, by User)
|
||||
14. Provider Circuit Breaker
|
||||
15. Merge migration files
|
||||
|
||||
## Notes
|
||||
|
||||
### Image Handling
|
||||
|
||||
Avatar images are stored in S3 as follows:
|
||||
|
||||
- `avatars/{user_public_id}/{avatar_hash}.original.png`
|
||||
- `avatars/{user_public_id}/{avatar_hash}.mini.png`
|
||||
|
||||
- The original image is converted to PNG and resized to a maximum of 512x512 pixels.
|
||||
- Ideally, non-square images are fitted to a square.
|
||||
- The mini image is converted to PNG and resized to a maximum of 16x16, 24x24, or 32x32 pixels. TBD.
|
||||
- All images receive a Content-Type header of `image/png`.
|
||||
|
||||
Image processing is handled immediately asynchronously, allowing a valid presigned URL to be generated immediately.
|
||||
15
pacman-server/migrations/20250917120000_init_users.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
-- users table
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
provider TEXT NOT NULL,
|
||||
provider_user_id TEXT NOT NULL,
|
||||
username TEXT NOT NULL,
|
||||
display_name TEXT NULL,
|
||||
email TEXT NULL,
|
||||
avatar_url TEXT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (provider, provider_user_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_users_provider ON users (provider, provider_user_id);
|
||||
18
pacman-server/migrations/20250917121000_oauth_accounts.sql
Normal file
@@ -0,0 +1,18 @@
|
||||
-- OAuth accounts linked to a single user
|
||||
CREATE TABLE IF NOT EXISTS oauth_accounts (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
provider TEXT NOT NULL,
|
||||
provider_user_id TEXT NOT NULL,
|
||||
email TEXT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE (provider, provider_user_id)
|
||||
);
|
||||
|
||||
-- Ensure we can look up by email efficiently
|
||||
CREATE INDEX IF NOT EXISTS idx_oauth_accounts_email ON oauth_accounts (email);
|
||||
|
||||
-- Optional: ensure users email uniqueness if desired; keep NULLs allowed
|
||||
ALTER TABLE users
|
||||
ADD CONSTRAINT users_email_unique UNIQUE (email);
|
||||
@@ -0,0 +1,15 @@
|
||||
-- Move provider-specific profile fields from users to oauth_accounts
|
||||
|
||||
-- Add provider profile fields to oauth_accounts
|
||||
ALTER TABLE oauth_accounts
|
||||
ADD COLUMN IF NOT EXISTS username TEXT,
|
||||
ADD COLUMN IF NOT EXISTS display_name TEXT NULL,
|
||||
ADD COLUMN IF NOT EXISTS avatar_url TEXT NULL;
|
||||
|
||||
-- Drop provider-specific fields from users (keep email as canonical)
|
||||
ALTER TABLE users
|
||||
DROP COLUMN IF EXISTS provider,
|
||||
DROP COLUMN IF EXISTS provider_user_id,
|
||||
DROP COLUMN IF EXISTS username,
|
||||
DROP COLUMN IF EXISTS display_name,
|
||||
DROP COLUMN IF EXISTS avatar_url;
|
||||
235
pacman-server/src/app.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
use axum::{routing::get, Router};
|
||||
use axum_cookie::CookieLayer;
|
||||
use dashmap::DashMap;
|
||||
use jsonwebtoken::{DecodingKey, EncodingKey};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::{Notify, RwLock};
|
||||
use tokio::task::JoinHandle;
|
||||
use tower_http::normalize_path::NormalizePathLayer;
|
||||
use tower_http::services::{ServeDir, ServeFile};
|
||||
use tracing::info_span;
|
||||
|
||||
use crate::data::pool::PgPool;
|
||||
use crate::{auth::AuthRegistry, config::Config, image::ImageStorage, routes};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Health {
|
||||
migrations: bool,
|
||||
database: bool,
|
||||
}
|
||||
|
||||
impl Health {
|
||||
pub fn ok(&self) -> bool {
|
||||
self.migrations && self.database
|
||||
}
|
||||
|
||||
pub fn set_migrations(&mut self, done: bool) {
|
||||
self.migrations = done;
|
||||
}
|
||||
|
||||
pub fn set_database(&mut self, ok: bool) {
|
||||
self.database = ok;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub auth: Arc<AuthRegistry>,
|
||||
pub sessions: Arc<DashMap<String, crate::auth::provider::AuthUser>>,
|
||||
pub jwt_encoding_key: Arc<EncodingKey>,
|
||||
pub jwt_decoding_key: Arc<DecodingKey>,
|
||||
pub db: PgPool,
|
||||
pub health: Arc<RwLock<Health>>,
|
||||
pub image_storage: Arc<ImageStorage>,
|
||||
pub healthchecker_task: Arc<RwLock<Option<JoinHandle<()>>>>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub async fn new(config: Config, auth: AuthRegistry, db: PgPool, shutdown_notify: Arc<Notify>) -> Self {
|
||||
Self::new_with_database(config, auth, db, shutdown_notify, true).await
|
||||
}
|
||||
|
||||
pub async fn new_with_database(
|
||||
config: Config,
|
||||
auth: AuthRegistry,
|
||||
db: PgPool,
|
||||
shutdown_notify: Arc<Notify>,
|
||||
use_database: bool,
|
||||
) -> Self {
|
||||
let jwt_secret = config.jwt_secret.clone();
|
||||
|
||||
// Initialize image storage
|
||||
let image_storage = match ImageStorage::from_config(&config) {
|
||||
Ok(storage) => Arc::new(storage),
|
||||
Err(e) => {
|
||||
tracing::warn!(error = %e, "Failed to initialize image storage, avatar processing will be disabled");
|
||||
// Create a dummy storage that will fail gracefully
|
||||
Arc::new(ImageStorage::new(&config, "dummy").unwrap_or_else(|_| panic!("Failed to create dummy image storage")))
|
||||
}
|
||||
};
|
||||
|
||||
let app_state = Self {
|
||||
auth: Arc::new(auth),
|
||||
sessions: Arc::new(DashMap::new()),
|
||||
jwt_encoding_key: Arc::new(EncodingKey::from_secret(jwt_secret.as_bytes())),
|
||||
jwt_decoding_key: Arc::new(DecodingKey::from_secret(jwt_secret.as_bytes())),
|
||||
db: db,
|
||||
health: Arc::new(RwLock::new(Health::default())),
|
||||
image_storage,
|
||||
healthchecker_task: Arc::new(RwLock::new(None)),
|
||||
};
|
||||
|
||||
// Start the healthchecker task only if database is being used
|
||||
if use_database {
|
||||
let health_state = app_state.health.clone();
|
||||
let db_pool = app_state.db.clone();
|
||||
let healthchecker_task = app_state.healthchecker_task.clone();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
tracing::trace!("Health checker task started");
|
||||
let mut backoff: u32 = 1;
|
||||
let mut next_sleep = Duration::from_secs(0);
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = shutdown_notify.notified() => {
|
||||
tracing::trace!("Health checker received shutdown notification; exiting");
|
||||
break;
|
||||
}
|
||||
|
||||
_ = tokio::time::sleep(next_sleep) => {
|
||||
// Run health check
|
||||
}
|
||||
}
|
||||
|
||||
// Run the actual health check
|
||||
let ok = sqlx::query("SELECT 1").execute(&db_pool).await.is_ok();
|
||||
{
|
||||
let mut h = health_state.write().await;
|
||||
h.set_database(ok);
|
||||
}
|
||||
if ok {
|
||||
tracing::trace!(database_ok = true, "Health check succeeded; scheduling next run in 90s");
|
||||
backoff = 1;
|
||||
next_sleep = Duration::from_secs(90);
|
||||
} else {
|
||||
backoff = (backoff.saturating_mul(2)).min(60);
|
||||
tracing::trace!(database_ok = false, backoff, "Health check failed; backing off");
|
||||
next_sleep = Duration::from_secs(backoff as u64);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Store the task handle
|
||||
let mut task_handle = healthchecker_task.write().await;
|
||||
*task_handle = Some(task);
|
||||
}
|
||||
|
||||
app_state
|
||||
}
|
||||
|
||||
/// Force an immediate health check (debug mode only)
|
||||
pub async fn check_health(&self) -> bool {
|
||||
let ok = sqlx::query("SELECT 1").execute(&self.db).await.is_ok();
|
||||
let mut h = self.health.write().await;
|
||||
h.set_database(ok);
|
||||
ok
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a custom span for HTTP requests with reduced verbosity
|
||||
pub fn make_span<B>(request: &axum::http::Request<B>) -> tracing::Span {
|
||||
let path = request
|
||||
.uri()
|
||||
.path_and_query()
|
||||
.map(|v| v.as_str())
|
||||
.unwrap_or_else(|| request.uri().path());
|
||||
|
||||
if request.method() == axum::http::Method::GET {
|
||||
info_span!(
|
||||
"request",
|
||||
path = %path,
|
||||
)
|
||||
} else {
|
||||
info_span!(
|
||||
"request",
|
||||
method = %request.method(),
|
||||
path = %path,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create the application router with all routes and middleware
|
||||
pub fn create_router(app_state: AppState) -> Router {
|
||||
// Get static files directory from environment variable
|
||||
// Default to /app/static for production (Docker), or web/dist/client for local dev
|
||||
let static_dir = std::env::var("STATIC_FILES_DIR").unwrap_or_else(|_| {
|
||||
if std::path::Path::new("/app/static").exists() {
|
||||
"/app/static".to_string()
|
||||
} else {
|
||||
"web/dist/client".to_string()
|
||||
}
|
||||
});
|
||||
|
||||
let static_path = PathBuf::from(&static_dir);
|
||||
let index_path = static_path.join("index.html");
|
||||
|
||||
// Create API router with all backend routes
|
||||
let api_router = Router::new()
|
||||
.route(
|
||||
"/",
|
||||
get(|| async { "Pac-Man API Server. Visit /api/auth/github to start OAuth flow." }),
|
||||
)
|
||||
.route("/health", get(routes::health_handler))
|
||||
.route("/auth/providers", get(routes::list_providers_handler))
|
||||
.route("/auth/{provider}", get(routes::oauth_authorize_handler))
|
||||
.route("/auth/{provider}/callback", get(routes::oauth_callback_handler))
|
||||
.route("/logout", get(routes::logout_handler))
|
||||
.route("/profile", get(routes::profile_handler))
|
||||
.with_state(app_state)
|
||||
.layer(CookieLayer::default())
|
||||
.layer(axum::middleware::from_fn(inject_server_header));
|
||||
|
||||
// Create main router with API routes nested under /api
|
||||
let router = Router::new()
|
||||
.route(
|
||||
"/api/",
|
||||
get(|| async { "Pac-Man API Server. Visit /api/auth/github to start OAuth flow." }),
|
||||
)
|
||||
.nest("/api", api_router);
|
||||
|
||||
// Add static file serving if the directory exists
|
||||
let router = if static_path.exists() {
|
||||
tracing::info!(path = %static_dir, "Serving static files from directory");
|
||||
router.fallback_service(ServeDir::new(&static_path).not_found_service(ServeFile::new(&index_path)))
|
||||
} else {
|
||||
tracing::warn!(path = %static_dir, "Static files directory not found, serving API only");
|
||||
router
|
||||
};
|
||||
|
||||
// Add tracing layer to the entire router
|
||||
router.layer(NormalizePathLayer::trim_trailing_slash()).layer(
|
||||
tower_http::trace::TraceLayer::new_for_http()
|
||||
.make_span_with(make_span)
|
||||
.on_request(|_request: &axum::http::Request<axum::body::Body>, _span: &tracing::Span| {
|
||||
// Disable request logging by doing nothing
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Inject the server header into responses
|
||||
async fn inject_server_header(
|
||||
req: axum::http::Request<axum::body::Body>,
|
||||
next: axum::middleware::Next,
|
||||
) -> Result<axum::response::Response, axum::http::StatusCode> {
|
||||
let mut res = next.run(req).await;
|
||||
res.headers_mut().insert(
|
||||
axum::http::header::SERVER,
|
||||
axum::http::HeaderValue::from_static(SERVER_HEADER_VALUE),
|
||||
);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// Constant value for the Server header: "<crate>/<version>"
|
||||
const SERVER_HEADER_VALUE: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"));
|
||||
127
pacman-server/src/auth/discord.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use jsonwebtoken::EncodingKey;
|
||||
use oauth2::{AuthorizationCode, CsrfToken, PkceCodeVerifier, Scope, TokenResponse};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use std::sync::Arc;
|
||||
use tracing::{trace, warn};
|
||||
|
||||
use crate::auth::provider::{AuthUser, AuthorizeInfo, OAuthProvider};
|
||||
use crate::errors::ErrorResponse;
|
||||
use crate::session;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DiscordUser {
|
||||
pub id: String,
|
||||
pub username: String,
|
||||
pub global_name: Option<String>,
|
||||
pub email: Option<String>,
|
||||
pub verified: Option<bool>,
|
||||
pub avatar: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn fetch_discord_user(
|
||||
http_client: &reqwest::Client,
|
||||
access_token: &str,
|
||||
) -> Result<DiscordUser, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let response = http_client
|
||||
.get("https://discord.com/api/users/@me")
|
||||
.header("Authorization", format!("Bearer {}", access_token))
|
||||
.header("User-Agent", crate::config::USER_AGENT)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
warn!(status = %response.status(), endpoint = "/users/@me", "Discord API returned an error");
|
||||
return Err(format!("Discord API error: {}", response.status()).into());
|
||||
}
|
||||
|
||||
let user: DiscordUser = response.json().await?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub struct DiscordProvider {
|
||||
pub client: super::OAuthClient,
|
||||
pub http: reqwest::Client,
|
||||
}
|
||||
|
||||
impl DiscordProvider {
|
||||
pub fn new(client: super::OAuthClient, http: reqwest::Client) -> Arc<Self> {
|
||||
Arc::new(Self { client, http })
|
||||
}
|
||||
|
||||
fn avatar_url_for(user_id: &str, avatar_hash: &str) -> String {
|
||||
let ext = if avatar_hash.starts_with("a_") { "gif" } else { "png" };
|
||||
format!("https://cdn.discordapp.com/avatars/{}/{}.{}", user_id, avatar_hash, ext)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl OAuthProvider for DiscordProvider {
|
||||
fn id(&self) -> &'static str {
|
||||
"discord"
|
||||
}
|
||||
fn label(&self) -> &'static str {
|
||||
"Discord"
|
||||
}
|
||||
|
||||
async fn authorize(&self, encoding_key: &EncodingKey) -> Result<AuthorizeInfo, ErrorResponse> {
|
||||
let (pkce_challenge, pkce_verifier) = oauth2::PkceCodeChallenge::new_random_sha256();
|
||||
let (authorize_url, csrf_state) = self
|
||||
.client
|
||||
.authorize_url(CsrfToken::new_random)
|
||||
.set_pkce_challenge(pkce_challenge)
|
||||
.add_scope(Scope::new("identify".to_string()))
|
||||
.add_scope(Scope::new("email".to_string()))
|
||||
.url();
|
||||
|
||||
// Store PKCE verifier and CSRF state in session
|
||||
let session_token = session::create_pkce_session(pkce_verifier.secret(), csrf_state.secret(), encoding_key);
|
||||
|
||||
trace!(state = %csrf_state.secret(), "Generated OAuth authorization URL");
|
||||
Ok(AuthorizeInfo {
|
||||
authorize_url,
|
||||
session_token,
|
||||
})
|
||||
}
|
||||
|
||||
async fn exchange_code_for_token(&self, code: &str, verifier: &str) -> Result<String, ErrorResponse> {
|
||||
let token = self
|
||||
.client
|
||||
.exchange_code(AuthorizationCode::new(code.to_string()))
|
||||
.set_pkce_verifier(PkceCodeVerifier::new(verifier.to_string()))
|
||||
.request_async(&self.http)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warn!(error = %e, "Token exchange with Discord failed");
|
||||
ErrorResponse::bad_gateway("token_exchange_failed", Some(e.to_string()))
|
||||
})?;
|
||||
|
||||
Ok(token.access_token().secret().to_string())
|
||||
}
|
||||
|
||||
async fn fetch_user_from_token(&self, access_token: &str) -> Result<AuthUser, ErrorResponse> {
|
||||
let user = fetch_discord_user(&self.http, access_token).await.map_err(|e| {
|
||||
warn!(error = %e, "Failed to fetch Discord user profile");
|
||||
ErrorResponse::bad_gateway("discord_api_error", Some(format!("failed to fetch user: {}", e)))
|
||||
})?;
|
||||
|
||||
let avatar_url = match (&user.id, &user.avatar) {
|
||||
(id, Some(hash)) => Some(Self::avatar_url_for(id, hash)),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let (email, email_verified) = match (&user.email, user.verified) {
|
||||
(Some(e), Some(true)) => (Some(e.clone()), true),
|
||||
_ => (None, false),
|
||||
};
|
||||
|
||||
Ok(AuthUser {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
name: user.global_name,
|
||||
email,
|
||||
email_verified,
|
||||
avatar_url,
|
||||
})
|
||||
}
|
||||
}
|
||||
158
pacman-server/src/auth/github.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use jsonwebtoken::EncodingKey;
|
||||
use oauth2::{AuthorizationCode, CsrfToken, PkceCodeVerifier, Scope, TokenResponse};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use std::sync::Arc;
|
||||
use tracing::{trace, warn};
|
||||
|
||||
use crate::{
|
||||
auth::provider::{AuthUser, AuthorizeInfo, OAuthProvider},
|
||||
errors::ErrorResponse,
|
||||
session,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GitHubUser {
|
||||
pub id: u64,
|
||||
pub login: String,
|
||||
pub name: Option<String>,
|
||||
pub email: Option<String>,
|
||||
pub avatar_url: String,
|
||||
pub html_url: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct GitHubEmail {
|
||||
pub email: String,
|
||||
pub primary: bool,
|
||||
pub verified: bool,
|
||||
pub visibility: Option<String>,
|
||||
}
|
||||
|
||||
/// Fetch user information from GitHub API
|
||||
pub async fn fetch_github_user(
|
||||
http_client: &reqwest::Client,
|
||||
access_token: &str,
|
||||
) -> Result<GitHubUser, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let response = http_client
|
||||
.get("https://api.github.com/user")
|
||||
.header("Authorization", format!("Bearer {}", access_token))
|
||||
.header("Accept", "application/vnd.github.v3+json")
|
||||
.header("User-Agent", crate::config::USER_AGENT)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
warn!(status = %response.status(), endpoint = "/user", "GitHub API returned an error");
|
||||
return Err(format!("GitHub API error: {}", response.status()).into());
|
||||
}
|
||||
|
||||
let user: GitHubUser = response.json().await?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
/// Fetch user emails from GitHub API
|
||||
pub async fn fetch_github_emails(
|
||||
http_client: &reqwest::Client,
|
||||
access_token: &str,
|
||||
) -> Result<Vec<GitHubEmail>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let response = http_client
|
||||
.get("https://api.github.com/user/emails")
|
||||
.header("Authorization", format!("Bearer {}", access_token))
|
||||
.header("Accept", "application/vnd.github.v3+json")
|
||||
.header("User-Agent", crate::config::USER_AGENT)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
warn!(status = %response.status(), endpoint = "/user/emails", "GitHub API returned an error");
|
||||
return Err(format!("GitHub API error: {}", response.status()).into());
|
||||
}
|
||||
|
||||
let emails: Vec<GitHubEmail> = response.json().await?;
|
||||
Ok(emails)
|
||||
}
|
||||
|
||||
pub struct GitHubProvider {
|
||||
pub client: super::OAuthClient,
|
||||
pub http: reqwest::Client,
|
||||
}
|
||||
|
||||
impl GitHubProvider {
|
||||
pub fn new(client: super::OAuthClient, http: reqwest::Client) -> Arc<Self> {
|
||||
Arc::new(Self { client, http })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl OAuthProvider for GitHubProvider {
|
||||
fn id(&self) -> &'static str {
|
||||
"github"
|
||||
}
|
||||
fn label(&self) -> &'static str {
|
||||
"GitHub"
|
||||
}
|
||||
|
||||
async fn authorize(&self, encoding_key: &EncodingKey) -> Result<AuthorizeInfo, ErrorResponse> {
|
||||
let (pkce_challenge, pkce_verifier) = oauth2::PkceCodeChallenge::new_random_sha256();
|
||||
let (authorize_url, csrf_state) = self
|
||||
.client
|
||||
.authorize_url(CsrfToken::new_random)
|
||||
.set_pkce_challenge(pkce_challenge)
|
||||
.add_scope(Scope::new("user:email".to_string()))
|
||||
.add_scope(Scope::new("read:user".to_string()))
|
||||
.url();
|
||||
|
||||
// Store PKCE verifier and CSRF state in session
|
||||
let session_token = session::create_pkce_session(pkce_verifier.secret(), csrf_state.secret(), encoding_key);
|
||||
|
||||
trace!(state = %csrf_state.secret(), "Generated OAuth authorization URL");
|
||||
Ok(AuthorizeInfo {
|
||||
authorize_url,
|
||||
session_token,
|
||||
})
|
||||
}
|
||||
|
||||
async fn exchange_code_for_token(&self, code: &str, verifier: &str) -> Result<String, ErrorResponse> {
|
||||
let token = self
|
||||
.client
|
||||
.exchange_code(AuthorizationCode::new(code.to_string()))
|
||||
.set_pkce_verifier(PkceCodeVerifier::new(verifier.to_string()))
|
||||
.request_async(&self.http)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warn!(error = %e, "Token exchange with GitHub failed");
|
||||
ErrorResponse::bad_gateway("token_exchange_failed", Some(e.to_string()))
|
||||
})?;
|
||||
|
||||
Ok(token.access_token().secret().to_string())
|
||||
}
|
||||
|
||||
async fn fetch_user_from_token(&self, access_token: &str) -> Result<AuthUser, ErrorResponse> {
|
||||
let user = fetch_github_user(&self.http, access_token).await.map_err(|e| {
|
||||
warn!(error = %e, "Failed to fetch GitHub user profile");
|
||||
ErrorResponse::bad_gateway("github_api_error", Some(format!("failed to fetch user: {}", e)))
|
||||
})?;
|
||||
|
||||
let emails = fetch_github_emails(&self.http, access_token).await.map_err(|e| {
|
||||
warn!(error = %e, "Failed to fetch GitHub user emails");
|
||||
ErrorResponse::bad_gateway("github_api_error", Some(format!("failed to fetch emails: {}", e)))
|
||||
})?;
|
||||
|
||||
let primary_email = emails.into_iter().find(|e| e.primary && e.verified);
|
||||
|
||||
let (email, email_verified) = match primary_email {
|
||||
Some(e) => (Some(e.email), true),
|
||||
None => (user.email, false),
|
||||
};
|
||||
|
||||
Ok(AuthUser {
|
||||
id: user.id.to_string(),
|
||||
username: user.login,
|
||||
name: user.name,
|
||||
email,
|
||||
email_verified,
|
||||
avatar_url: Some(user.avatar_url),
|
||||
})
|
||||
}
|
||||
}
|
||||
67
pacman-server/src/auth/mod.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use oauth2::{basic::BasicClient, EndpointNotSet, EndpointSet};
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
pub mod discord;
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
pub mod github;
|
||||
pub mod provider;
|
||||
|
||||
type OAuthClient =
|
||||
BasicClient<oauth2::EndpointSet, oauth2::EndpointNotSet, oauth2::EndpointNotSet, oauth2::EndpointNotSet, oauth2::EndpointSet>;
|
||||
|
||||
pub struct AuthRegistry {
|
||||
pub providers: HashMap<&'static str, Arc<dyn provider::OAuthProvider>>,
|
||||
}
|
||||
|
||||
impl AuthRegistry {
|
||||
pub fn new(config: &Config) -> Result<Self, oauth2::url::ParseError> {
|
||||
let http = reqwest::ClientBuilder::new()
|
||||
.redirect(reqwest::redirect::Policy::none())
|
||||
.build()
|
||||
.expect("HTTP client should build");
|
||||
|
||||
let github_client: BasicClient<EndpointSet, EndpointNotSet, EndpointNotSet, EndpointNotSet, EndpointSet> =
|
||||
BasicClient::new(oauth2::ClientId::new(config.github_client_id.clone()))
|
||||
.set_client_secret(oauth2::ClientSecret::new(config.github_client_secret.clone()))
|
||||
.set_auth_uri(oauth2::AuthUrl::new("https://github.com/login/oauth/authorize".to_string())?)
|
||||
.set_token_uri(oauth2::TokenUrl::new(
|
||||
"https://github.com/login/oauth/access_token".to_string(),
|
||||
)?)
|
||||
.set_redirect_uri(
|
||||
oauth2::RedirectUrl::new(format!("{}/auth/github/callback", config.public_base_url))
|
||||
.expect("Invalid redirect URI"),
|
||||
);
|
||||
|
||||
let mut providers: HashMap<&'static str, Arc<dyn provider::OAuthProvider>> = HashMap::new();
|
||||
providers.insert("github", github::GitHubProvider::new(github_client, http.clone()));
|
||||
|
||||
// Discord OAuth client
|
||||
let discord_client: BasicClient<EndpointSet, EndpointNotSet, EndpointNotSet, EndpointNotSet, EndpointSet> =
|
||||
BasicClient::new(oauth2::ClientId::new(config.discord_client_id.clone()))
|
||||
.set_client_secret(oauth2::ClientSecret::new(config.discord_client_secret.clone()))
|
||||
.set_auth_uri(oauth2::AuthUrl::new("https://discord.com/api/oauth2/authorize".to_string())?)
|
||||
.set_token_uri(oauth2::TokenUrl::new("https://discord.com/api/oauth2/token".to_string())?)
|
||||
.set_redirect_uri(
|
||||
oauth2::RedirectUrl::new(format!("{}/auth/discord/callback", config.public_base_url))
|
||||
.expect("Invalid redirect URI"),
|
||||
);
|
||||
providers.insert("discord", discord::DiscordProvider::new(discord_client, http));
|
||||
|
||||
Ok(Self { providers })
|
||||
}
|
||||
|
||||
pub fn get(&self, id: &str) -> Option<&Arc<dyn provider::OAuthProvider>> {
|
||||
self.providers.get(id)
|
||||
}
|
||||
|
||||
pub fn values(&self) -> impl Iterator<Item = &Arc<dyn provider::OAuthProvider>> {
|
||||
self.providers.values()
|
||||
}
|
||||
}
|
||||
131
pacman-server/src/auth/provider.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use async_trait::async_trait;
|
||||
use axum_cookie::CookieManager;
|
||||
use jsonwebtoken::{DecodingKey, EncodingKey};
|
||||
use mockall::automock;
|
||||
use serde::Serialize;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::errors::ErrorResponse;
|
||||
use crate::session;
|
||||
|
||||
// A user object returned from the provider after authentication.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct AuthUser {
|
||||
// A unique identifier for the user, from the provider.
|
||||
pub id: String,
|
||||
// A username from the provider. Generally unique, a handle for the user.
|
||||
pub username: String,
|
||||
|
||||
// A display name for the user. Not always available.
|
||||
pub name: Option<String>,
|
||||
// An email address for the user. Not always available.
|
||||
pub email: Option<String>,
|
||||
// Whether the email address has been verified by the provider.
|
||||
pub email_verified: bool,
|
||||
// An avatar URL for the user. Not always available.
|
||||
pub avatar_url: Option<String>,
|
||||
}
|
||||
|
||||
// Information required to begin an OAuth authorization flow.
|
||||
#[derive(Debug)]
|
||||
pub struct AuthorizeInfo {
|
||||
// The URL to redirect the user to for authorization.
|
||||
pub authorize_url: oauth2::url::Url,
|
||||
// A session token to be stored in the user's session cookie.
|
||||
pub session_token: String,
|
||||
}
|
||||
|
||||
#[automock]
|
||||
#[async_trait]
|
||||
pub trait OAuthProvider: Send + Sync {
|
||||
// Builds the necessary information to redirect the user to the provider's authorization page.
|
||||
// This generally also includes beginning a PKCE flow (proof key for code exchange).
|
||||
// The returned session token should be stored in the user's session cookie.
|
||||
async fn authorize(&self, encoding_key: &EncodingKey) -> Result<AuthorizeInfo, ErrorResponse>;
|
||||
|
||||
// Handles the callback from the provider after the user has authorized the app.
|
||||
// This generally also includes completing the PKCE flow (proof key for code exchange).
|
||||
// The cookie manager is used to retrieve the PKCE verifier from the session.
|
||||
async fn handle_callback(
|
||||
&self,
|
||||
code: &str,
|
||||
state: &str,
|
||||
cookie: &CookieManager,
|
||||
decoding_key: &DecodingKey,
|
||||
) -> Result<AuthUser, ErrorResponse> {
|
||||
// Common PKCE session validation and token exchange logic
|
||||
let verifier = self.validate_pkce_session(cookie, state, decoding_key).await?;
|
||||
let access_token = self.exchange_code_for_token(code, &verifier).await?;
|
||||
let user = self.fetch_user_from_token(&access_token).await?;
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
// Validates the PKCE session and returns the verifier
|
||||
async fn validate_pkce_session(
|
||||
&self,
|
||||
cookie: &CookieManager,
|
||||
state: &str,
|
||||
decoding_key: &DecodingKey,
|
||||
) -> Result<String, ErrorResponse> {
|
||||
// Get the session token and verify it's a PKCE session
|
||||
let Some(session_token) = session::get_session_token(cookie) else {
|
||||
warn!(%state, "Missing session cookie during OAuth callback");
|
||||
return Err(ErrorResponse::bad_request(
|
||||
"invalid_request",
|
||||
Some("missing session cookie".into()),
|
||||
));
|
||||
};
|
||||
|
||||
let Some(claims) = session::decode_jwt(&session_token, decoding_key) else {
|
||||
warn!(%state, "Invalid session token during OAuth callback");
|
||||
return Err(ErrorResponse::bad_request(
|
||||
"invalid_request",
|
||||
Some("invalid session token".into()),
|
||||
));
|
||||
};
|
||||
|
||||
// Verify this is a PKCE session and the state matches
|
||||
if !session::is_pkce_session(&claims) {
|
||||
warn!(%state, "Session is not a PKCE session");
|
||||
return Err(ErrorResponse::bad_request(
|
||||
"invalid_request",
|
||||
Some("invalid session type".into()),
|
||||
));
|
||||
}
|
||||
|
||||
if claims.csrf_state.as_deref() != Some(state) {
|
||||
warn!(%state, "CSRF state mismatch during OAuth callback");
|
||||
return Err(ErrorResponse::bad_request(
|
||||
"invalid_request",
|
||||
Some("state parameter mismatch".into()),
|
||||
));
|
||||
}
|
||||
|
||||
let Some(verifier) = claims.pkce_verifier else {
|
||||
warn!(%state, "Missing PKCE verifier in session");
|
||||
return Err(ErrorResponse::bad_request(
|
||||
"invalid_request",
|
||||
Some("missing pkce verifier".into()),
|
||||
));
|
||||
};
|
||||
|
||||
Ok(verifier)
|
||||
}
|
||||
|
||||
// Exchanges the authorization code for an access token using PKCE
|
||||
async fn exchange_code_for_token(&self, code: &str, verifier: &str) -> Result<String, ErrorResponse>;
|
||||
|
||||
// Fetches user information from the provider using the access token
|
||||
async fn fetch_user_from_token(&self, access_token: &str) -> Result<AuthUser, ErrorResponse>;
|
||||
|
||||
// The provider's unique identifier (e.g. "discord")
|
||||
fn id(&self) -> &'static str;
|
||||
|
||||
// The provider's display name (e.g. "Discord")
|
||||
fn label(&self) -> &'static str;
|
||||
|
||||
// Whether the provider is active (defaults to true for now)
|
||||
fn active(&self) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
79
pacman-server/src/config.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
use figment::{providers::Env, value::UncasedStr, Figment};
|
||||
use serde::{Deserialize, Deserializer};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct Config {
|
||||
// Database URL
|
||||
pub database_url: String,
|
||||
// Discord Credentials
|
||||
#[serde(deserialize_with = "deserialize_string_from_any")]
|
||||
pub discord_client_id: String,
|
||||
pub discord_client_secret: String,
|
||||
// GitHub Credentials
|
||||
#[serde(deserialize_with = "deserialize_string_from_any")]
|
||||
pub github_client_id: String,
|
||||
pub github_client_secret: String,
|
||||
// S3 Credentials
|
||||
pub s3_access_key: String,
|
||||
pub s3_secret_access_key: String,
|
||||
pub s3_bucket_name: String,
|
||||
pub s3_public_base_url: String,
|
||||
// Server Details
|
||||
#[serde(default = "default_port")]
|
||||
pub port: u16,
|
||||
#[serde(default = "default_host")]
|
||||
pub host: std::net::IpAddr,
|
||||
#[serde(default = "default_shutdown_timeout")]
|
||||
pub shutdown_timeout_seconds: u32,
|
||||
// Public base URL used for OAuth redirect URIs
|
||||
pub public_base_url: String,
|
||||
// JWT
|
||||
pub jwt_secret: String,
|
||||
}
|
||||
|
||||
// Standard User-Agent: name/version (+site)
|
||||
pub const USER_AGENT: &str = concat!(
|
||||
env!("CARGO_PKG_NAME"),
|
||||
"/",
|
||||
env!("CARGO_PKG_VERSION"),
|
||||
" (+https://pacman.xevion.dev)"
|
||||
);
|
||||
|
||||
fn default_host() -> std::net::IpAddr {
|
||||
"0.0.0.0".parse().unwrap()
|
||||
}
|
||||
|
||||
fn default_port() -> u16 {
|
||||
3000
|
||||
}
|
||||
|
||||
fn default_shutdown_timeout() -> u32 {
|
||||
5
|
||||
}
|
||||
|
||||
fn deserialize_string_from_any<'de, D>(deserializer: D) -> Result<String, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
use serde_json::Value;
|
||||
|
||||
let value = Value::deserialize(deserializer)?;
|
||||
match value {
|
||||
Value::String(s) => Ok(s),
|
||||
Value::Number(n) => Ok(n.to_string()),
|
||||
_ => Err(serde::de::Error::custom("Expected string or number")),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_config() -> Config {
|
||||
Figment::new()
|
||||
.merge(Env::raw().map(|key| {
|
||||
if key == UncasedStr::new("RAILWAY_DEPLOYMENT_DRAINING_SECONDS") {
|
||||
"SHUTDOWN_TIMEOUT_SECONDS".into()
|
||||
} else {
|
||||
key.into()
|
||||
}
|
||||
}))
|
||||
.extract()
|
||||
.expect("Failed to load config")
|
||||
}
|
||||
2
pacman-server/src/data/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod pool;
|
||||
pub mod user;
|
||||
21
pacman-server/src/data/pool.rs
Normal file
@@ -0,0 +1,21 @@
|
||||
use sqlx::{postgres::PgPoolOptions, Pool, Postgres};
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub type PgPool = Pool<Postgres>;
|
||||
|
||||
pub async fn create_pool(immediate: bool, database_url: &str, max_connections: u32) -> PgPool {
|
||||
info!(immediate, "Connecting to PostgreSQL");
|
||||
|
||||
let options = PgPoolOptions::new().max_connections(max_connections);
|
||||
|
||||
if immediate {
|
||||
options.connect(database_url).await.unwrap_or_else(|e| {
|
||||
warn!(error = %e, "Failed to connect to PostgreSQL");
|
||||
panic!("database connect failed: {}", e);
|
||||
})
|
||||
} else {
|
||||
options
|
||||
.connect_lazy(database_url)
|
||||
.expect("Failed to create lazy database pool")
|
||||
}
|
||||
}
|
||||
126
pacman-server/src/data/user.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use serde::Serialize;
|
||||
use sqlx::FromRow;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, FromRow)]
|
||||
pub struct User {
|
||||
pub id: i64,
|
||||
pub email: Option<String>,
|
||||
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||
pub updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, FromRow)]
|
||||
pub struct OAuthAccount {
|
||||
pub id: i64,
|
||||
pub user_id: i64,
|
||||
pub provider: String,
|
||||
pub provider_user_id: String,
|
||||
pub email: Option<String>,
|
||||
pub username: Option<String>,
|
||||
pub display_name: Option<String>,
|
||||
pub avatar_url: Option<String>,
|
||||
pub created_at: chrono::DateTime<chrono::Utc>,
|
||||
pub updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
pub async fn find_user_by_email(pool: &sqlx::PgPool, email: &str) -> Result<Option<User>, sqlx::Error> {
|
||||
sqlx::query_as::<_, User>(
|
||||
r#"
|
||||
SELECT id, email, created_at, updated_at
|
||||
FROM users WHERE email = $1
|
||||
"#,
|
||||
)
|
||||
.bind(email)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn link_oauth_account(
|
||||
pool: &sqlx::PgPool,
|
||||
user_id: i64,
|
||||
provider: &str,
|
||||
provider_user_id: &str,
|
||||
email: Option<&str>,
|
||||
username: Option<&str>,
|
||||
display_name: Option<&str>,
|
||||
avatar_url: Option<&str>,
|
||||
) -> Result<OAuthAccount, sqlx::Error> {
|
||||
sqlx::query_as::<_, OAuthAccount>(
|
||||
r#"
|
||||
INSERT INTO oauth_accounts (user_id, provider, provider_user_id, email, username, display_name, avatar_url)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (provider, provider_user_id)
|
||||
DO UPDATE SET email = EXCLUDED.email, username = EXCLUDED.username, display_name = EXCLUDED.display_name, avatar_url = EXCLUDED.avatar_url, user_id = EXCLUDED.user_id, updated_at = NOW()
|
||||
RETURNING id, user_id, provider, provider_user_id, email, username, display_name, avatar_url, created_at, updated_at
|
||||
"#,
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(provider)
|
||||
.bind(provider_user_id)
|
||||
.bind(email)
|
||||
.bind(username)
|
||||
.bind(display_name)
|
||||
.bind(avatar_url)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create_user(pool: &sqlx::PgPool, email: Option<&str>) -> Result<User, sqlx::Error> {
|
||||
sqlx::query_as::<_, User>(
|
||||
r#"
|
||||
INSERT INTO users (email)
|
||||
VALUES ($1)
|
||||
ON CONFLICT (email) DO UPDATE SET email = EXCLUDED.email
|
||||
RETURNING id, email, created_at, updated_at
|
||||
"#,
|
||||
)
|
||||
.bind(email)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn find_user_by_provider_id(
|
||||
pool: &sqlx::PgPool,
|
||||
provider: &str,
|
||||
provider_user_id: &str,
|
||||
) -> Result<Option<User>, sqlx::Error> {
|
||||
let rec = sqlx::query_as::<_, User>(
|
||||
r#"
|
||||
SELECT u.id, u.email, u.created_at, u.updated_at
|
||||
FROM users u
|
||||
JOIN oauth_accounts oa ON oa.user_id = u.id
|
||||
WHERE oa.provider = $1 AND oa.provider_user_id = $2
|
||||
"#,
|
||||
)
|
||||
.bind(provider)
|
||||
.bind(provider_user_id)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
Ok(rec)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, FromRow)]
|
||||
pub struct ProviderPublic {
|
||||
pub provider: String,
|
||||
pub provider_user_id: String,
|
||||
pub email: Option<String>,
|
||||
pub username: Option<String>,
|
||||
pub display_name: Option<String>,
|
||||
pub avatar_url: Option<String>,
|
||||
}
|
||||
|
||||
pub async fn list_user_providers(pool: &sqlx::PgPool, user_id: i64) -> Result<Vec<ProviderPublic>, sqlx::Error> {
|
||||
let recs = sqlx::query_as::<_, ProviderPublic>(
|
||||
r#"
|
||||
SELECT provider, provider_user_id, email, username, display_name, avatar_url
|
||||
FROM oauth_accounts
|
||||
WHERE user_id = $1
|
||||
ORDER BY provider
|
||||
"#,
|
||||
)
|
||||
.bind(user_id)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
Ok(recs)
|
||||
}
|
||||
55
pacman-server/src/errors.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
use axum::{http::StatusCode, response::IntoResponse, Json};
|
||||
use serde::Serialize;
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct ErrorResponse {
|
||||
#[serde(skip_serializing)]
|
||||
status_code: Option<StatusCode>,
|
||||
pub error: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl ErrorResponse {
|
||||
pub fn status_code(&self) -> StatusCode {
|
||||
self.status_code.unwrap_or(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
|
||||
pub fn unauthorized(description: impl Into<String>) -> Self {
|
||||
Self {
|
||||
status_code: Some(StatusCode::UNAUTHORIZED),
|
||||
error: "unauthorized".into(),
|
||||
description: Some(description.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bad_request(error: impl Into<String>, description: impl Into<Option<String>>) -> Self {
|
||||
Self {
|
||||
status_code: Some(StatusCode::BAD_REQUEST),
|
||||
error: error.into(),
|
||||
description: description.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bad_gateway(error: impl Into<String>, description: impl Into<Option<String>>) -> Self {
|
||||
Self {
|
||||
status_code: Some(StatusCode::BAD_GATEWAY),
|
||||
error: error.into(),
|
||||
description: description.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_status(status: StatusCode, error: impl Into<String>, description: impl Into<Option<String>>) -> Self {
|
||||
Self {
|
||||
status_code: Some(status),
|
||||
error: error.into(),
|
||||
description: description.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoResponse for ErrorResponse {
|
||||
fn into_response(self) -> axum::response::Response {
|
||||
(self.status_code(), Json(self)).into_response()
|
||||
}
|
||||
}
|
||||
243
pacman-server/src/formatter.rs
Normal file
@@ -0,0 +1,243 @@
|
||||
//! Custom tracing formatter
|
||||
use serde::Serialize;
|
||||
use serde_json::{Map, Value};
|
||||
use std::fmt;
|
||||
use time::macros::format_description;
|
||||
use time::{format_description::FormatItem, OffsetDateTime};
|
||||
use tracing::field::{Field, Visit};
|
||||
use tracing::{Event, Level, Subscriber};
|
||||
use tracing_subscriber::fmt::format::Writer;
|
||||
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields};
|
||||
use tracing_subscriber::registry::LookupSpan;
|
||||
use yansi::Paint;
|
||||
|
||||
// Cached format description for timestamps
|
||||
const TIMESTAMP_FORMAT: &[FormatItem<'static>] = format_description!("[hour]:[minute]:[second].[subsecond digits:5]");
|
||||
|
||||
/// A custom formatter with enhanced timestamp formatting
|
||||
///
|
||||
/// Re-implementation of the Full formatter with improved timestamp display.
|
||||
pub struct CustomPrettyFormatter;
|
||||
|
||||
impl<S, N> FormatEvent<S, N> for CustomPrettyFormatter
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> FormatFields<'a> + 'static,
|
||||
{
|
||||
fn format_event(&self, ctx: &FmtContext<'_, S, N>, mut writer: Writer<'_>, event: &Event<'_>) -> fmt::Result {
|
||||
let meta = event.metadata();
|
||||
|
||||
// 1) Timestamp (dimmed when ANSI)
|
||||
let now = OffsetDateTime::now_utc();
|
||||
let formatted_time = now.format(&TIMESTAMP_FORMAT).map_err(|e| {
|
||||
eprintln!("Failed to format timestamp: {}", e);
|
||||
fmt::Error
|
||||
})?;
|
||||
write_dimmed(&mut writer, formatted_time)?;
|
||||
writer.write_char(' ')?;
|
||||
|
||||
// 2) Colored 5-char level like Full
|
||||
write_colored_level(&mut writer, meta.level())?;
|
||||
writer.write_char(' ')?;
|
||||
|
||||
// 3) Span scope chain (bold names, fields in braces, dimmed ':')
|
||||
if let Some(scope) = ctx.event_scope() {
|
||||
let mut saw_any = false;
|
||||
for span in scope.from_root() {
|
||||
write_bold(&mut writer, span.metadata().name())?;
|
||||
saw_any = true;
|
||||
write_dimmed(&mut writer, ":")?;
|
||||
|
||||
let ext = span.extensions();
|
||||
if let Some(fields) = &ext.get::<FormattedFields<N>>() {
|
||||
if !fields.fields.is_empty() {
|
||||
write_bold(&mut writer, "{")?;
|
||||
writer.write_str(fields.fields.as_str())?;
|
||||
write_bold(&mut writer, "}")?;
|
||||
}
|
||||
}
|
||||
write_dimmed(&mut writer, ":")?;
|
||||
}
|
||||
if saw_any {
|
||||
writer.write_char(' ')?;
|
||||
}
|
||||
}
|
||||
|
||||
// 4) Target (dimmed), then a space
|
||||
if writer.has_ansi_escapes() {
|
||||
write!(writer, "{}: ", Paint::new(meta.target()).dim())?;
|
||||
} else {
|
||||
write!(writer, "{}: ", meta.target())?;
|
||||
}
|
||||
|
||||
// 5) Event fields
|
||||
ctx.format_fields(writer.by_ref(), event)?;
|
||||
|
||||
// 6) Newline
|
||||
writeln!(writer)
|
||||
}
|
||||
}
|
||||
|
||||
/// A custom JSON formatter that flattens fields to root level
|
||||
///
|
||||
/// Outputs logs in the format:
|
||||
/// { "message": "...", "level": "...", "customAttribute": "..." }
|
||||
pub struct CustomJsonFormatter;
|
||||
|
||||
impl<S, N> FormatEvent<S, N> for CustomJsonFormatter
|
||||
where
|
||||
S: Subscriber + for<'a> LookupSpan<'a>,
|
||||
N: for<'a> FormatFields<'a> + 'static,
|
||||
{
|
||||
fn format_event(&self, ctx: &FmtContext<'_, S, N>, mut writer: Writer<'_>, event: &Event<'_>) -> fmt::Result {
|
||||
let meta = event.metadata();
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct EventFields {
|
||||
message: String,
|
||||
level: String,
|
||||
target: String,
|
||||
#[serde(flatten)]
|
||||
spans: Map<String, Value>,
|
||||
#[serde(flatten)]
|
||||
fields: Map<String, Value>,
|
||||
}
|
||||
|
||||
let (message, fields, spans) = {
|
||||
let mut message: Option<String> = None;
|
||||
let mut fields: Map<String, Value> = Map::new();
|
||||
let mut spans: Map<String, Value> = Map::new();
|
||||
|
||||
struct FieldVisitor<'a> {
|
||||
message: &'a mut Option<String>,
|
||||
fields: &'a mut Map<String, Value>,
|
||||
}
|
||||
impl Visit for FieldVisitor<'_> {
|
||||
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
|
||||
let key = field.name();
|
||||
if key == "message" {
|
||||
*self.message = Some(format!("{:?}", value));
|
||||
} else {
|
||||
self.fields.insert(key.to_string(), Value::String(format!("{:?}", value)));
|
||||
}
|
||||
}
|
||||
|
||||
fn record_str(&mut self, field: &Field, value: &str) {
|
||||
let key = field.name();
|
||||
if key == "message" {
|
||||
*self.message = Some(value.to_string());
|
||||
} else {
|
||||
self.fields.insert(key.to_string(), Value::String(value.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
fn record_i64(&mut self, field: &Field, value: i64) {
|
||||
let key = field.name();
|
||||
if key != "message" {
|
||||
self.fields
|
||||
.insert(key.to_string(), Value::Number(serde_json::Number::from(value)));
|
||||
}
|
||||
}
|
||||
|
||||
fn record_u64(&mut self, field: &Field, value: u64) {
|
||||
let key = field.name();
|
||||
if key != "message" {
|
||||
self.fields
|
||||
.insert(key.to_string(), Value::Number(serde_json::Number::from(value)));
|
||||
}
|
||||
}
|
||||
|
||||
fn record_bool(&mut self, field: &Field, value: bool) {
|
||||
let key = field.name();
|
||||
if key != "message" {
|
||||
self.fields.insert(key.to_string(), Value::Bool(value));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut visitor = FieldVisitor {
|
||||
message: &mut message,
|
||||
fields: &mut fields,
|
||||
};
|
||||
event.record(&mut visitor);
|
||||
|
||||
// Collect span information from the span hierarchy
|
||||
if let Some(scope) = ctx.event_scope() {
|
||||
for span in scope.from_root() {
|
||||
let span_name = span.metadata().name().to_string();
|
||||
let mut span_fields: Map<String, Value> = Map::new();
|
||||
|
||||
// Try to extract fields from FormattedFields
|
||||
let ext = span.extensions();
|
||||
if let Some(formatted_fields) = ext.get::<FormattedFields<N>>() {
|
||||
// Try to parse as JSON first
|
||||
if let Ok(json_fields) = serde_json::from_str::<Map<String, Value>>(formatted_fields.fields.as_str()) {
|
||||
span_fields.extend(json_fields);
|
||||
} else {
|
||||
// If not valid JSON, treat the entire field string as a single field
|
||||
span_fields.insert("raw".to_string(), Value::String(formatted_fields.fields.as_str().to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// Insert span as a nested object directly into the spans map
|
||||
spans.insert(span_name, Value::Object(span_fields));
|
||||
}
|
||||
}
|
||||
|
||||
(message, fields, spans)
|
||||
};
|
||||
|
||||
let json = EventFields {
|
||||
message: message.unwrap_or_default(),
|
||||
level: meta.level().to_string(),
|
||||
target: meta.target().to_string(),
|
||||
spans,
|
||||
fields,
|
||||
};
|
||||
|
||||
writeln!(
|
||||
writer,
|
||||
"{}",
|
||||
serde_json::to_string(&json).unwrap_or_else(|_| "{}".to_string())
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Write the verbosity level with the same coloring/alignment as the Full formatter.
|
||||
fn write_colored_level(writer: &mut Writer<'_>, level: &Level) -> fmt::Result {
|
||||
if writer.has_ansi_escapes() {
|
||||
let paint = match *level {
|
||||
Level::TRACE => Paint::new("TRACE").magenta(),
|
||||
Level::DEBUG => Paint::new("DEBUG").blue(),
|
||||
Level::INFO => Paint::new(" INFO").green(),
|
||||
Level::WARN => Paint::new(" WARN").yellow(),
|
||||
Level::ERROR => Paint::new("ERROR").red(),
|
||||
};
|
||||
write!(writer, "{}", paint)
|
||||
} else {
|
||||
// Right-pad to width 5 like Full's non-ANSI mode
|
||||
match *level {
|
||||
Level::TRACE => write!(writer, "{:>5}", "TRACE"),
|
||||
Level::DEBUG => write!(writer, "{:>5}", "DEBUG"),
|
||||
Level::INFO => write!(writer, "{:>5}", " INFO"),
|
||||
Level::WARN => write!(writer, "{:>5}", " WARN"),
|
||||
Level::ERROR => write!(writer, "{:>5}", "ERROR"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn write_dimmed(writer: &mut Writer<'_>, s: impl fmt::Display) -> fmt::Result {
|
||||
if writer.has_ansi_escapes() {
|
||||
write!(writer, "{}", Paint::new(s).dim())
|
||||
} else {
|
||||
write!(writer, "{}", s)
|
||||
}
|
||||
}
|
||||
|
||||
fn write_bold(writer: &mut Writer<'_>, s: impl fmt::Display) -> fmt::Result {
|
||||
if writer.has_ansi_escapes() {
|
||||
write!(writer, "{}", Paint::new(s).bold())
|
||||
} else {
|
||||
write!(writer, "{}", s)
|
||||
}
|
||||
}
|
||||
183
pacman-server/src/image.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use image::codecs::png::PngEncoder;
|
||||
use s3::Bucket;
|
||||
use sha2::Digest;
|
||||
use tracing::trace;
|
||||
|
||||
use crate::config::Config;
|
||||
|
||||
/// Minimal S3-backed image storage. This keeps things intentionally simple for now:
|
||||
/// - construct from existing `Config`
|
||||
/// - upload raw bytes under a key
|
||||
/// - upload a local file by path (reads whole file into memory)
|
||||
/// - generate a simple presigned GET URL
|
||||
/// - process avatars with resizing and upload
|
||||
///
|
||||
/// Backed by `s3-tokio` (hyper 1 + rustls) and compatible with S3/R2/MinIO endpoints.
|
||||
#[derive(Clone)]
|
||||
pub struct ImageStorage {
|
||||
bucket: Arc<s3::Bucket>,
|
||||
public_base_url: String,
|
||||
}
|
||||
|
||||
impl ImageStorage {
|
||||
/// Create a new storage for a specific `bucket_name` using settings from `Config`.
|
||||
///
|
||||
/// This uses a custom region + endpoint so it works across AWS S3 and compatible services
|
||||
/// such as Cloudflare R2 and MinIO.
|
||||
pub fn new(config: &Config, bucket_name: impl Into<String>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let credentials = s3::creds::Credentials::new(
|
||||
Some(&config.s3_access_key),
|
||||
Some(&config.s3_secret_access_key),
|
||||
None, // security token
|
||||
None, // session token
|
||||
None, // profile
|
||||
)?;
|
||||
|
||||
let bucket = Bucket::new(
|
||||
&bucket_name.into(),
|
||||
s3::Region::R2 {
|
||||
account_id: "f188bf93079278e7bbc58de9b3d80693".to_string(),
|
||||
},
|
||||
credentials,
|
||||
)?
|
||||
.with_path_style();
|
||||
|
||||
Ok(Self {
|
||||
bucket: Arc::new(bucket),
|
||||
public_base_url: config.s3_public_base_url.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Upload a byte slice to `key` with optional content type.
|
||||
///
|
||||
/// Returns the ETag (if present) from the server response.
|
||||
pub async fn upload_bytes(
|
||||
&self,
|
||||
key: &str,
|
||||
bytes: impl AsRef<[u8]>,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<Option<String>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let data = bytes.as_ref();
|
||||
let content_type = content_type.unwrap_or("application/octet-stream");
|
||||
|
||||
// Prefer the content-type variant for correct metadata
|
||||
let status = {
|
||||
let response = self.bucket.put_object_with_content_type(key, data, content_type).await?;
|
||||
response.status_code()
|
||||
};
|
||||
|
||||
if (200..300).contains(&status) {
|
||||
// s3-tokio returns headers separately; attempt to pull the ETag if available
|
||||
// Note: the current API returns (status, headers) where headers is `http::HeaderMap`.
|
||||
// Some providers omit ETag on PUT; we handle that by returning `None`.
|
||||
Ok(None)
|
||||
} else {
|
||||
Err(format!("upload failed with status {}", status).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a simple presigned GET URL valid for `expires_in_seconds`.
|
||||
#[allow(dead_code)]
|
||||
pub fn presign_get(&self, key: &str, expires_in_seconds: u32) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = self.bucket.presign_get(key, expires_in_seconds, None)?;
|
||||
Ok(url)
|
||||
}
|
||||
|
||||
/// Process and upload an avatar from a URL.
|
||||
///
|
||||
/// Downloads the image, resizes it to 512x512 (original) and 32x32 (mini),
|
||||
/// then uploads both versions to S3. Returns the public URLs for both images.
|
||||
pub async fn process_avatar(
|
||||
&self,
|
||||
user_public_id: &str,
|
||||
avatar_url: &str,
|
||||
) -> Result<AvatarUrls, Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Download the avatar image
|
||||
let response = reqwest::get(avatar_url).await?;
|
||||
if !response.status().is_success() {
|
||||
return Err(format!("Failed to download avatar: {}", response.status()).into());
|
||||
}
|
||||
|
||||
let image_bytes = response.bytes().await?;
|
||||
trace!(bytes = image_bytes.len(), "Downloaded avatar");
|
||||
|
||||
// Decode the image
|
||||
let img = image::load_from_memory(&image_bytes)?;
|
||||
let img_rgba = img.to_rgba8();
|
||||
|
||||
// Generate a simple hash for the avatar (using the URL for now)
|
||||
let avatar_hash = format!("{:x}", sha2::Sha256::digest(avatar_url.as_bytes()));
|
||||
trace!(
|
||||
width = img_rgba.width(),
|
||||
height = img_rgba.height(),
|
||||
hash = avatar_hash,
|
||||
"Avatar image decoded"
|
||||
);
|
||||
|
||||
// Process original (512x512 max, square)
|
||||
let original_key = format!("avatars/{}/{}.original.png", user_public_id, avatar_hash);
|
||||
let original_png = self.resize_to_square_png(&img_rgba, 512)?;
|
||||
self.upload_bytes(&original_key, &original_png, Some("image/png")).await?;
|
||||
trace!(key = original_key, "Uploaded original avatar");
|
||||
|
||||
// Process mini (32x32)
|
||||
let mini_key = format!("avatars/{}/{}.mini.png", user_public_id, avatar_hash);
|
||||
let mini_png = self.resize_to_square_png(&img_rgba, 32)?;
|
||||
self.upload_bytes(&mini_key, &mini_png, Some("image/png")).await?;
|
||||
trace!(key = mini_key, "Uploaded mini avatar");
|
||||
|
||||
Ok(AvatarUrls {
|
||||
original_url: format!("{}/{}", self.public_base_url, original_key),
|
||||
mini_url: format!("{}/{}", self.public_base_url, mini_key),
|
||||
})
|
||||
}
|
||||
|
||||
/// Resize an RGBA image to a square of the specified size, maintaining aspect ratio.
|
||||
fn resize_to_square_png(
|
||||
&self,
|
||||
img: &image::RgbaImage,
|
||||
target_size: u32,
|
||||
) -> Result<Vec<u8>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let (width, height) = img.dimensions();
|
||||
|
||||
// Calculate dimensions for square crop (center crop)
|
||||
let size = width.min(height);
|
||||
let start_x = (width - size) / 2;
|
||||
let start_y = (height - size) / 2;
|
||||
|
||||
// Crop to square
|
||||
let cropped = image::imageops::crop_imm(img, start_x, start_y, size, size).to_image();
|
||||
|
||||
// Resize to target size
|
||||
let resized = image::imageops::resize(&cropped, target_size, target_size, image::imageops::FilterType::Lanczos3);
|
||||
|
||||
// Encode as PNG
|
||||
let mut bytes: Vec<u8> = Vec::new();
|
||||
let cursor = std::io::Cursor::new(&mut bytes);
|
||||
|
||||
// Write the resized image to the cursor
|
||||
resized.write_with_encoder(PngEncoder::new(cursor))?;
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// URLs for processed avatar images
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AvatarUrls {
|
||||
pub original_url: String,
|
||||
pub mini_url: String,
|
||||
}
|
||||
|
||||
impl ImageStorage {
|
||||
/// Create a new storage using the default bucket from `Config`.
|
||||
pub fn from_config(config: &Config) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
Self::new(config, &config.s3_bucket_name)
|
||||
}
|
||||
}
|
||||
|
||||
// References:
|
||||
// - Example (R2): https://github.com/FemLolStudio/s3-tokio/blob/master/examples/r2-tokio.rs
|
||||
// - Crate docs: https://lib.rs/crates/s3-tokio
|
||||
16
pacman-server/src/lib.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
|
||||
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
pub mod config;
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
pub mod errors;
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
pub mod formatter;
|
||||
|
||||
pub mod app;
|
||||
pub mod auth;
|
||||
pub mod data;
|
||||
pub mod image;
|
||||
pub mod logging;
|
||||
pub mod routes;
|
||||
pub mod session;
|
||||
38
pacman-server/src/logging.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use tracing_subscriber::{fmt::format::JsonFields, EnvFilter, FmtSubscriber};
|
||||
|
||||
use crate::formatter;
|
||||
|
||||
static SUBSCRIBER_INIT: std::sync::Once = std::sync::Once::new();
|
||||
|
||||
/// Configure and initialize logging for the application
|
||||
pub fn setup_logging() {
|
||||
SUBSCRIBER_INIT.call_once(|| {
|
||||
// Allow RUST_LOG to override levels; default to info for our crate and warn elsewhere
|
||||
let filter = EnvFilter::try_from_default_env()
|
||||
.unwrap_or_else(|_| EnvFilter::new(format!("warn,{name}=info,{name}::auth=info", name = env!("CARGO_CRATE_NAME"))));
|
||||
|
||||
// Default to pretty for local dev; switchable later if we add CLI
|
||||
let use_pretty = cfg!(debug_assertions);
|
||||
|
||||
let subscriber: Box<dyn tracing::Subscriber + Send + Sync> = if use_pretty {
|
||||
Box::new(
|
||||
FmtSubscriber::builder()
|
||||
.with_target(true)
|
||||
.event_format(formatter::CustomPrettyFormatter)
|
||||
.with_env_filter(filter)
|
||||
.finish(),
|
||||
)
|
||||
} else {
|
||||
Box::new(
|
||||
FmtSubscriber::builder()
|
||||
.with_target(true)
|
||||
.event_format(formatter::CustomJsonFormatter)
|
||||
.fmt_fields(JsonFields::new())
|
||||
.with_env_filter(filter)
|
||||
.finish(),
|
||||
)
|
||||
};
|
||||
|
||||
tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed");
|
||||
});
|
||||
}
|
||||
144
pacman-server/src/main.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
#![cfg_attr(coverage_nightly, feature(coverage_attribute))]
|
||||
#![cfg_attr(coverage_nightly, coverage(off))]
|
||||
|
||||
use crate::{
|
||||
app::{create_router, AppState},
|
||||
auth::AuthRegistry,
|
||||
config::Config,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tracing::{info, trace, warn};
|
||||
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use tokio::sync::{watch, Notify};
|
||||
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
mod config;
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
mod errors;
|
||||
#[cfg_attr(coverage_nightly, coverage(off))]
|
||||
mod formatter;
|
||||
|
||||
mod app;
|
||||
mod auth;
|
||||
mod data;
|
||||
mod image;
|
||||
mod logging;
|
||||
mod routes;
|
||||
mod session;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("Failed to install default crypto provider");
|
||||
|
||||
// Load environment variables
|
||||
#[cfg(debug_assertions)]
|
||||
dotenvy::from_path(std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".env")).ok();
|
||||
#[cfg(not(debug_assertions))]
|
||||
dotenvy::dotenv().ok();
|
||||
|
||||
// Load configuration
|
||||
let config: Config = config::load_config();
|
||||
|
||||
// Initialize tracing subscriber
|
||||
logging::setup_logging();
|
||||
trace!(host = %config.host, port = config.port, shutdown_timeout_seconds = config.shutdown_timeout_seconds, "Loaded server configuration");
|
||||
|
||||
let addr = std::net::SocketAddr::new(config.host, config.port);
|
||||
let shutdown_timeout = std::time::Duration::from_secs(config.shutdown_timeout_seconds as u64);
|
||||
let auth = AuthRegistry::new(&config).expect("auth initializer");
|
||||
let db = data::pool::create_pool(true, &config.database_url, 10).await;
|
||||
|
||||
// Run database migrations at startup
|
||||
if let Err(e) = sqlx::migrate!("./migrations").run(&db).await {
|
||||
panic!("failed to run database migrations: {}", e);
|
||||
}
|
||||
|
||||
// Create the shutdown notification before creating AppState
|
||||
let notify = Arc::new(Notify::new());
|
||||
|
||||
let app_state = AppState::new(config, auth, db, notify.clone()).await;
|
||||
{
|
||||
// migrations succeeded
|
||||
let mut h = app_state.health.write().await;
|
||||
h.set_migrations(true);
|
||||
}
|
||||
|
||||
let app = create_router(app_state);
|
||||
|
||||
info!(%addr, "Starting HTTP server bind");
|
||||
let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
|
||||
info!(%addr, "HTTP server listening");
|
||||
|
||||
// coordinated graceful shutdown with timeout
|
||||
let (tx_signal, rx_signal) = watch::channel::<Option<Instant>>(None);
|
||||
|
||||
{
|
||||
let notify = notify.clone();
|
||||
let tx = tx_signal.clone();
|
||||
tokio::spawn(async move {
|
||||
let signaled_at = shutdown_signal().await;
|
||||
let _ = tx.send(Some(signaled_at));
|
||||
notify.notify_waiters();
|
||||
});
|
||||
}
|
||||
|
||||
let mut rx_for_timeout = rx_signal.clone();
|
||||
let timeout_task = async move {
|
||||
// wait until first signal observed
|
||||
while rx_for_timeout.borrow().is_none() {
|
||||
if rx_for_timeout.changed().await.is_err() {
|
||||
return; // channel closed
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(shutdown_timeout).await;
|
||||
warn!(timeout = ?shutdown_timeout, "Shutdown timeout elapsed; forcing exit");
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
let server = axum::serve(listener, app).with_graceful_shutdown(async move {
|
||||
notify.notified().await;
|
||||
});
|
||||
|
||||
tokio::select! {
|
||||
res = server => {
|
||||
// server finished; if we had a signal, print remaining time
|
||||
let now = Instant::now();
|
||||
if let Some(signaled_at) = *rx_signal.borrow() {
|
||||
let elapsed = now.duration_since(signaled_at);
|
||||
if elapsed < shutdown_timeout {
|
||||
let remaining = format!("{:.2?}", shutdown_timeout - elapsed);
|
||||
info!(remaining = remaining, "Graceful shutdown complete");
|
||||
}
|
||||
}
|
||||
res.unwrap();
|
||||
}
|
||||
_ = timeout_task => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn shutdown_signal() -> Instant {
|
||||
let ctrl_c = async {
|
||||
tokio::signal::ctrl_c().await.expect("failed to install Ctrl+C handler");
|
||||
warn!(signal = "ctrl_c", "Received Ctrl+C; shutting down");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let sigterm = async {
|
||||
let mut term_stream = signal(SignalKind::terminate()).expect("failed to install SIGTERM handler");
|
||||
term_stream.recv().await;
|
||||
warn!(signal = "sigterm", "Received SIGTERM; shutting down");
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let sigterm = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => { Instant::now() }
|
||||
_ = sigterm => { Instant::now() }
|
||||
}
|
||||
}
|
||||
294
pacman-server/src/routes.rs
Normal file
@@ -0,0 +1,294 @@
|
||||
use axum::{
|
||||
extract::{Path, Query, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Redirect},
|
||||
};
|
||||
use axum_cookie::CookieManager;
|
||||
use serde::Serialize;
|
||||
use tracing::{debug, debug_span, info, instrument, trace, warn, Instrument};
|
||||
|
||||
use crate::data::user as user_repo;
|
||||
use crate::{app::AppState, errors::ErrorResponse, session};
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
pub struct OAuthCallbackParams {
|
||||
pub code: Option<String>,
|
||||
pub state: Option<String>,
|
||||
pub error: Option<String>,
|
||||
pub error_description: Option<String>,
|
||||
}
|
||||
|
||||
/// Handles the beginning of the OAuth authorization flow.
|
||||
///
|
||||
/// Requires the `provider` path parameter, which determines the OAuth provider to use.
|
||||
#[instrument(skip_all, fields(provider = %provider))]
|
||||
pub async fn oauth_authorize_handler(
|
||||
State(app_state): State<AppState>,
|
||||
Path(provider): Path<String>,
|
||||
cookie: CookieManager,
|
||||
) -> axum::response::Response {
|
||||
let Some(prov) = app_state.auth.get(&provider) else {
|
||||
warn!(%provider, "Unknown OAuth provider");
|
||||
return ErrorResponse::bad_request("invalid_provider", Some(provider)).into_response();
|
||||
};
|
||||
trace!("Starting OAuth authorization");
|
||||
|
||||
let auth_info = match prov.authorize(&app_state.jwt_encoding_key).await {
|
||||
Ok(info) => info,
|
||||
Err(e) => return e.into_response(),
|
||||
};
|
||||
|
||||
session::set_session_cookie(&cookie, &auth_info.session_token);
|
||||
trace!("Redirecting to provider authorization page");
|
||||
Redirect::to(auth_info.authorize_url.as_str()).into_response()
|
||||
}
|
||||
|
||||
/// Handles the callback from the OAuth provider after the user has authorized the app.
|
||||
///
|
||||
/// Requires the `provider` path parameter, which determines the OAuth provider to use for finishing the OAuth flow.
|
||||
/// Requires the `code` and `state` query parameters, which are returned by the OAuth provider after the user has authorized the app.
|
||||
pub async fn oauth_callback_handler(
|
||||
State(app_state): State<AppState>,
|
||||
Path(provider): Path<String>,
|
||||
Query(params): Query<OAuthCallbackParams>,
|
||||
cookie: CookieManager,
|
||||
) -> axum::response::Response {
|
||||
// Validate provider
|
||||
let Some(prov) = app_state.auth.get(&provider) else {
|
||||
warn!(%provider, "Unknown OAuth provider");
|
||||
return ErrorResponse::bad_request("invalid_provider", Some(provider)).into_response();
|
||||
};
|
||||
|
||||
// Process callback-returned errors from provider
|
||||
if let Some(error) = params.error {
|
||||
warn!(%provider, error = %error, desc = ?params.error_description, "OAuth callback returned an error");
|
||||
return ErrorResponse::bad_request(error, params.error_description).into_response();
|
||||
}
|
||||
|
||||
// Acquire required parameters
|
||||
let Some(code) = params.code.as_deref() else {
|
||||
return ErrorResponse::bad_request("invalid_request", Some("missing code".into())).into_response();
|
||||
};
|
||||
let Some(state) = params.state.as_deref() else {
|
||||
return ErrorResponse::bad_request("invalid_request", Some("missing state".into())).into_response();
|
||||
};
|
||||
|
||||
debug_span!("oauth_callback_handler", provider = %provider, code = %code, state = %state);
|
||||
|
||||
// Handle callback from provider
|
||||
let user = match prov.handle_callback(code, state, &cookie, &app_state.jwt_decoding_key).await {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
warn!(%provider, "OAuth callback handling failed");
|
||||
return e.into_response();
|
||||
}
|
||||
};
|
||||
|
||||
// --- Simplified Sign-in / Sign-up Flow ---
|
||||
let linking_span = debug_span!("account_linking", provider_user_id = %user.id, provider_email = ?user.email, email_verified = %user.email_verified);
|
||||
let db_user_result: Result<user_repo::User, sqlx::Error> = async {
|
||||
// 1. Check if we already have this specific provider account linked
|
||||
if let Some(user) = user_repo::find_user_by_provider_id(&app_state.db, &provider, &user.id).await? {
|
||||
debug!(user_id = %user.id, "Found existing user by provider ID");
|
||||
return Ok(user);
|
||||
}
|
||||
|
||||
// 2. If not, try to find an existing user by verified email to link to
|
||||
let user_to_link = if user.email_verified {
|
||||
if let Some(email) = user.email.as_deref() {
|
||||
// Try to find a user with this email
|
||||
if let Some(existing_user) = user_repo::find_user_by_email(&app_state.db, email).await? {
|
||||
debug!(user_id = %existing_user.id, "Found existing user by email, linking new provider");
|
||||
existing_user
|
||||
} else {
|
||||
// No user with this email, create a new one
|
||||
debug!("No user found by email, creating a new one");
|
||||
user_repo::create_user(&app_state.db, Some(email)).await?
|
||||
}
|
||||
} else {
|
||||
// Verified, but no email for some reason. Create a user without an email.
|
||||
user_repo::create_user(&app_state.db, None).await?
|
||||
}
|
||||
} else {
|
||||
// No verified email, so we must create a new user without an email.
|
||||
debug!("No verified email, creating a new user");
|
||||
user_repo::create_user(&app_state.db, None).await?
|
||||
};
|
||||
|
||||
// 3. Link the new provider account to our user record (whether old or new)
|
||||
user_repo::link_oauth_account(
|
||||
&app_state.db,
|
||||
user_to_link.id,
|
||||
&provider,
|
||||
&user.id,
|
||||
user.email.as_deref(),
|
||||
Some(&user.username),
|
||||
user.name.as_deref(),
|
||||
user.avatar_url.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(user_to_link)
|
||||
}
|
||||
.instrument(linking_span)
|
||||
.await;
|
||||
|
||||
let _: user_repo::User = match db_user_result {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
warn!(error = %(&e as &dyn std::error::Error), "Failed to process user linking/creation");
|
||||
return ErrorResponse::with_status(StatusCode::INTERNAL_SERVER_ERROR, "database_error", None).into_response();
|
||||
}
|
||||
};
|
||||
|
||||
// Create session token
|
||||
let session_token = session::create_jwt_for_user(&provider, &user, &app_state.jwt_encoding_key);
|
||||
session::set_session_cookie(&cookie, &session_token);
|
||||
info!(%provider, "Signed in successfully");
|
||||
|
||||
// Process avatar asynchronously (don't block the response)
|
||||
if let Some(avatar_url) = user.avatar_url.as_deref() {
|
||||
let image_storage = app_state.image_storage.clone();
|
||||
let user_public_id = user.id.clone();
|
||||
let avatar_url = avatar_url.to_string();
|
||||
debug!(%user_public_id, %avatar_url, "Processing avatar");
|
||||
|
||||
tokio::spawn(async move {
|
||||
match image_storage.process_avatar(&user_public_id, &avatar_url).await {
|
||||
Ok(avatar_urls) => {
|
||||
info!(
|
||||
user_id = %user_public_id,
|
||||
original_url = %avatar_urls.original_url,
|
||||
mini_url = %avatar_urls.mini_url,
|
||||
"Avatar processed successfully"
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
user_id = %user_public_id,
|
||||
avatar_url = %avatar_url,
|
||||
error = %e,
|
||||
"Failed to process avatar"
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
(StatusCode::FOUND, Redirect::to("/api/profile")).into_response()
|
||||
}
|
||||
|
||||
/// Handles the request to the profile endpoint.
|
||||
///
|
||||
/// Requires the `session` cookie to be present.
|
||||
pub async fn profile_handler(State(app_state): State<AppState>, cookie: CookieManager) -> axum::response::Response {
|
||||
let Some(token_str) = session::get_session_token(&cookie) else {
|
||||
debug!("Missing session cookie");
|
||||
return ErrorResponse::unauthorized("missing session cookie").into_response();
|
||||
};
|
||||
let Some(claims) = session::decode_jwt(&token_str, &app_state.jwt_decoding_key) else {
|
||||
debug!("Invalid session token");
|
||||
return ErrorResponse::unauthorized("invalid session token").into_response();
|
||||
};
|
||||
// sub format: provider:provider_user_id
|
||||
let (prov, prov_user_id) = match claims.subject.split_once(':') {
|
||||
Some((p, id)) => (p, id),
|
||||
None => {
|
||||
debug!("Malformed session token subject");
|
||||
return ErrorResponse::unauthorized("invalid session token").into_response();
|
||||
}
|
||||
};
|
||||
match user_repo::find_user_by_provider_id(&app_state.db, prov, prov_user_id).await {
|
||||
Ok(Some(db_user)) => {
|
||||
// Include linked providers in the profile payload
|
||||
match user_repo::list_user_providers(&app_state.db, db_user.id).await {
|
||||
Ok(providers) => {
|
||||
#[derive(Serialize)]
|
||||
struct ProfilePayload<T> {
|
||||
id: i64,
|
||||
email: Option<String>,
|
||||
providers: Vec<T>,
|
||||
created_at: chrono::DateTime<chrono::Utc>,
|
||||
updated_at: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
let body = ProfilePayload {
|
||||
id: db_user.id,
|
||||
email: db_user.email.clone(),
|
||||
providers,
|
||||
created_at: db_user.created_at,
|
||||
updated_at: db_user.updated_at,
|
||||
};
|
||||
axum::Json(body).into_response()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(error = %e, "Failed to list user providers");
|
||||
ErrorResponse::with_status(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"database_error",
|
||||
Some("could not fetch providers".into()),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
debug!("User not found for session");
|
||||
ErrorResponse::unauthorized("session not found").into_response()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(error = %e, "Failed to fetch user for session");
|
||||
ErrorResponse::with_status(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"database_error",
|
||||
Some("could not fetch user".into()),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn logout_handler(State(app_state): State<AppState>, cookie: CookieManager) -> axum::response::Response {
|
||||
if let Some(token_str) = session::get_session_token(&cookie) {
|
||||
// Remove from in-memory sessions if present
|
||||
app_state.sessions.remove(&token_str);
|
||||
}
|
||||
session::clear_session_cookie(&cookie);
|
||||
info!("Signed out successfully");
|
||||
(StatusCode::FOUND, Redirect::to("/")).into_response()
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ProviderInfo {
|
||||
id: &'static str,
|
||||
name: &'static str,
|
||||
active: bool,
|
||||
}
|
||||
|
||||
pub async fn list_providers_handler(State(app_state): State<AppState>) -> axum::response::Response {
|
||||
let providers: Vec<ProviderInfo> = app_state
|
||||
.auth
|
||||
.values()
|
||||
.map(|provider| ProviderInfo {
|
||||
id: provider.id(),
|
||||
name: provider.label(),
|
||||
active: provider.active(),
|
||||
})
|
||||
.collect();
|
||||
axum::Json(providers).into_response()
|
||||
}
|
||||
|
||||
pub async fn health_handler(
|
||||
State(app_state): State<AppState>,
|
||||
Query(params): Query<std::collections::HashMap<String, String>>,
|
||||
) -> axum::response::Response {
|
||||
// Force health check in debug mode
|
||||
#[cfg(debug_assertions)]
|
||||
if params.contains_key("force") {
|
||||
app_state.check_health().await;
|
||||
}
|
||||
|
||||
let ok = app_state.health.read().await.ok();
|
||||
let status = if ok { StatusCode::OK } else { StatusCode::SERVICE_UNAVAILABLE };
|
||||
let body = serde_json::json!({ "ok": ok });
|
||||
(status, axum::Json(body)).into_response()
|
||||
}
|
||||
99
pacman-server/src/session.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use axum_cookie::{cookie::Cookie, prelude::SameSite, CookieManager};
|
||||
use jsonwebtoken::{decode, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation};
|
||||
|
||||
use crate::auth::provider::AuthUser;
|
||||
use tracing::{trace, warn};
|
||||
|
||||
pub const SESSION_COOKIE_NAME: &str = "session";
|
||||
pub const JWT_TTL_SECS: u64 = 60 * 60; // 1 hour
|
||||
|
||||
#[derive(Debug, serde::Serialize, serde::Deserialize)]
|
||||
pub struct Claims {
|
||||
#[serde(rename = "sub")]
|
||||
pub subject: String, // format: "{provider}:{provider_user_id}"
|
||||
pub name: Option<String>,
|
||||
#[serde(rename = "iat")]
|
||||
pub issued_at: usize,
|
||||
#[serde(rename = "exp")]
|
||||
pub expiration: usize,
|
||||
// PKCE flow fields - only present during OAuth flow
|
||||
#[serde(rename = "ver", skip_serializing_if = "Option::is_none")]
|
||||
pub pkce_verifier: Option<String>,
|
||||
#[serde(rename = "st", skip_serializing_if = "Option::is_none")]
|
||||
pub csrf_state: Option<String>,
|
||||
}
|
||||
|
||||
pub fn create_jwt_for_user(provider: &str, user: &AuthUser, encoding_key: &EncodingKey) -> String {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("time went backwards")
|
||||
.as_secs() as usize;
|
||||
let claims = Claims {
|
||||
subject: format!("{}:{}", provider, user.id),
|
||||
name: user.name.clone(),
|
||||
issued_at: now,
|
||||
expiration: now + JWT_TTL_SECS as usize,
|
||||
pkce_verifier: None,
|
||||
csrf_state: None,
|
||||
};
|
||||
let token = encode(&Header::new(Algorithm::HS256), &claims, encoding_key).expect("jwt sign");
|
||||
trace!(sub = %claims.subject, exp = claims.expiration, "Created session JWT");
|
||||
token
|
||||
}
|
||||
|
||||
/// Creates a temporary session for PKCE flow with verifier and CSRF state
|
||||
pub fn create_pkce_session(pkce_verifier: &str, csrf_state: &str, encoding_key: &EncodingKey) -> String {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("time went backwards")
|
||||
.as_secs() as usize;
|
||||
let claims = Claims {
|
||||
subject: "pkce_flow".to_string(), // Special marker for PKCE flow
|
||||
name: None,
|
||||
issued_at: now,
|
||||
expiration: now + JWT_TTL_SECS as usize,
|
||||
pkce_verifier: Some(pkce_verifier.to_string()),
|
||||
csrf_state: Some(csrf_state.to_string()),
|
||||
};
|
||||
let token = encode(&Header::new(Algorithm::HS256), &claims, encoding_key).expect("jwt sign");
|
||||
trace!(csrf_state = %csrf_state, "Created PKCE session JWT");
|
||||
token
|
||||
}
|
||||
|
||||
/// Checks if a session is a PKCE flow session
|
||||
pub fn is_pkce_session(claims: &Claims) -> bool {
|
||||
claims.pkce_verifier.is_some() && claims.csrf_state.is_some()
|
||||
}
|
||||
|
||||
pub fn decode_jwt(token: &str, decoding_key: &DecodingKey) -> Option<Claims> {
|
||||
let mut validation = Validation::new(Algorithm::HS256);
|
||||
validation.leeway = 30;
|
||||
match decode::<Claims>(token, decoding_key, &validation) {
|
||||
Ok(data) => Some(data.claims),
|
||||
Err(e) => {
|
||||
warn!(error = %e, "Session JWT verification failed");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_session_cookie(cookie: &CookieManager, token: &str) {
|
||||
cookie.add(
|
||||
Cookie::builder(SESSION_COOKIE_NAME, token.to_string())
|
||||
.http_only(true)
|
||||
.secure(!cfg!(debug_assertions))
|
||||
.path("/")
|
||||
.same_site(SameSite::Lax)
|
||||
.build(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn clear_session_cookie(cookie: &CookieManager) {
|
||||
cookie.remove(SESSION_COOKIE_NAME);
|
||||
}
|
||||
|
||||
pub fn get_session_token(cookie: &CookieManager) -> Option<String> {
|
||||
cookie.get(SESSION_COOKIE_NAME).map(|c| c.value().to_string())
|
||||
}
|
||||
17
pacman-server/tests/basics.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
mod common;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use crate::common::{test_context, TestContext};
|
||||
|
||||
// A basic test of all the server's routes that aren't covered by other tests.
|
||||
#[tokio::test]
|
||||
async fn test_basic_routes() {
|
||||
let routes = vec!["/api/", "/api/auth/providers"];
|
||||
|
||||
for route in routes {
|
||||
let TestContext { server, .. } = test_context().use_database(false).call().await;
|
||||
let response = server.get(route).await;
|
||||
assert_eq!(response.status_code(), 200);
|
||||
}
|
||||
}
|
||||
129
pacman-server/tests/common/mod.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use axum_test::TestServer;
|
||||
use bon::builder;
|
||||
use pacman_server::{
|
||||
app::{create_router, AppState},
|
||||
auth::AuthRegistry,
|
||||
config::Config,
|
||||
};
|
||||
use std::sync::{Arc, Once};
|
||||
use testcontainers::{
|
||||
core::{IntoContainerPort, WaitFor},
|
||||
runners::AsyncRunner,
|
||||
ContainerAsync, GenericImage, ImageExt,
|
||||
};
|
||||
use tokio::sync::Notify;
|
||||
use tracing::{debug, debug_span, Instrument};
|
||||
|
||||
static CRYPTO_INIT: Once = Once::new();
|
||||
|
||||
/// Test configuration for integration tests
|
||||
/// Do not destructure this struct if you need the database, it will be dropped implicitly, which will kill the database container prematurely.
|
||||
#[allow(dead_code)]
|
||||
pub struct TestContext {
|
||||
pub config: Config,
|
||||
pub server: TestServer,
|
||||
pub app_state: AppState,
|
||||
// Optional database
|
||||
pub container: Option<ContainerAsync<GenericImage>>,
|
||||
}
|
||||
|
||||
#[builder]
|
||||
pub async fn test_context(#[builder(default = false)] use_database: bool, auth_registry: Option<AuthRegistry>) -> TestContext {
|
||||
CRYPTO_INIT.call_once(|| {
|
||||
rustls::crypto::ring::default_provider()
|
||||
.install_default()
|
||||
.expect("Failed to install default crypto provider");
|
||||
});
|
||||
|
||||
// Set up logging
|
||||
std::env::set_var("RUST_LOG", "debug,sqlx=info");
|
||||
pacman_server::logging::setup_logging();
|
||||
let (database_url, container) = if use_database {
|
||||
let db = "testdb";
|
||||
let user = "testuser";
|
||||
let password = "testpass";
|
||||
|
||||
// Create container request
|
||||
let container_request = GenericImage::new("postgres", "15")
|
||||
.with_exposed_port(5432.tcp())
|
||||
.with_wait_for(WaitFor::message_on_stderr("database system is ready to accept connections"))
|
||||
.with_env_var("POSTGRES_DB", db)
|
||||
.with_env_var("POSTGRES_USER", user)
|
||||
.with_env_var("POSTGRES_PASSWORD", password);
|
||||
|
||||
tracing::debug!(request_image = ?container_request.image(), "Acquiring postgres testcontainer");
|
||||
let start = std::time::Instant::now();
|
||||
let container = container_request.start().await.unwrap();
|
||||
let duration: std::time::Duration = start.elapsed();
|
||||
let host = container.get_host().await.unwrap();
|
||||
let port = container.get_host_port_ipv4(5432).await.unwrap();
|
||||
|
||||
tracing::debug!(host = %host, port = %port, duration = ?duration, "Test database ready");
|
||||
(
|
||||
Some(format!("postgresql://{user}:{password}@{host}:{port}/{db}?sslmode=disable")),
|
||||
Some(container),
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let config = Config {
|
||||
database_url: database_url.clone().unwrap_or_default(),
|
||||
discord_client_id: "test_discord_client_id".to_string(),
|
||||
discord_client_secret: "test_discord_client_secret".to_string(),
|
||||
github_client_id: "test_github_client_id".to_string(),
|
||||
github_client_secret: "test_github_client_secret".to_string(),
|
||||
s3_access_key: "test_s3_access_key".to_string(),
|
||||
s3_secret_access_key: "test_s3_secret_access_key".to_string(),
|
||||
s3_bucket_name: "test_bucket".to_string(),
|
||||
s3_public_base_url: "https://test.example.com".to_string(),
|
||||
port: 0, // Will be set by test server
|
||||
host: "127.0.0.1".parse().unwrap(),
|
||||
shutdown_timeout_seconds: 5,
|
||||
public_base_url: "http://localhost:3000".to_string(),
|
||||
jwt_secret: "test_jwt_secret_key_for_testing_only".to_string(),
|
||||
};
|
||||
|
||||
let db = if use_database {
|
||||
let db = pacman_server::data::pool::create_pool(use_database, &database_url.unwrap(), 5).await;
|
||||
|
||||
// Run migrations
|
||||
sqlx::migrate!("./migrations")
|
||||
.run(&db)
|
||||
.instrument(debug_span!("running_migrations"))
|
||||
.await
|
||||
.expect("Failed to run database migrations");
|
||||
debug!("Database migrations ran successfully");
|
||||
|
||||
db
|
||||
} else {
|
||||
// Create a dummy database pool that will fail gracefully
|
||||
let dummy_url = "postgresql://dummy:dummy@localhost:5432/dummy?sslmode=disable";
|
||||
pacman_server::data::pool::create_pool(false, dummy_url, 1).await
|
||||
};
|
||||
|
||||
// Create auth registry
|
||||
let auth = auth_registry.unwrap_or_else(|| AuthRegistry::new(&config).expect("Failed to create auth registry"));
|
||||
|
||||
// Create app state
|
||||
let notify = Arc::new(Notify::new());
|
||||
let app_state = AppState::new_with_database(config.clone(), auth, db, notify, use_database).await;
|
||||
|
||||
// Set health status based on database usage
|
||||
{
|
||||
let mut health = app_state.health.write().await;
|
||||
health.set_migrations(use_database);
|
||||
health.set_database(use_database);
|
||||
}
|
||||
|
||||
let router = create_router(app_state.clone());
|
||||
let mut server = TestServer::new(router).unwrap();
|
||||
server.save_cookies();
|
||||
|
||||
TestContext {
|
||||
server,
|
||||
app_state,
|
||||
config,
|
||||
container,
|
||||
}
|
||||
}
|
||||
26
pacman-server/tests/health.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
mod common;
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
use crate::common::{test_context, TestContext};
|
||||
|
||||
/// Test health endpoint functionality with real database connectivity
|
||||
#[tokio::test]
|
||||
async fn test_health_endpoint() {
|
||||
let TestContext { server, container, .. } = test_context().use_database(true).call().await;
|
||||
|
||||
// First, verify health endpoint works when database is healthy
|
||||
let response = server.get("/api/health").await;
|
||||
assert_eq!(response.status_code(), 200);
|
||||
let health_json: serde_json::Value = response.json();
|
||||
assert_eq!(health_json["ok"], true);
|
||||
|
||||
// Now kill the database container to simulate database failure
|
||||
drop(container);
|
||||
|
||||
// Now verify health endpoint reports bad health
|
||||
let response = server.get("/api/health?force").await;
|
||||
assert_eq!(response.status_code(), 503); // SERVICE_UNAVAILABLE
|
||||
let health_json: serde_json::Value = response.json();
|
||||
assert_eq!(health_json["ok"], false);
|
||||
}
|
||||
272
pacman-server/tests/oauth.rs
Normal file
@@ -0,0 +1,272 @@
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use pacman_server::{
|
||||
auth::{
|
||||
provider::{AuthUser, MockOAuthProvider, OAuthProvider},
|
||||
AuthRegistry,
|
||||
},
|
||||
data::user as user_repo,
|
||||
session,
|
||||
};
|
||||
use pretty_assertions::assert_eq;
|
||||
use time::Duration;
|
||||
|
||||
mod common;
|
||||
use crate::common::{test_context, TestContext};
|
||||
|
||||
/// Test the basic authorization redirect flow
|
||||
#[tokio::test]
|
||||
async fn test_oauth_authorization_redirect() {
|
||||
let mut mock = MockOAuthProvider::new();
|
||||
mock.expect_authorize().returning(|encoding_key| {
|
||||
Ok(pacman_server::auth::provider::AuthorizeInfo {
|
||||
authorize_url: "https://example.com/auth".parse().unwrap(),
|
||||
session_token: session::create_pkce_session("verifier", "state", encoding_key),
|
||||
})
|
||||
});
|
||||
|
||||
let provider: Arc<dyn OAuthProvider> = Arc::new(mock);
|
||||
let mock_registry = AuthRegistry {
|
||||
providers: HashMap::from([("mock", provider)]),
|
||||
};
|
||||
|
||||
let TestContext { server, app_state, .. } = test_context().auth_registry(mock_registry).call().await;
|
||||
|
||||
let response = server.get("/api/auth/mock").await;
|
||||
assert_eq!(response.status_code(), 303);
|
||||
assert_eq!(response.headers().get("location").unwrap(), "https://example.com/auth");
|
||||
|
||||
let session_cookie = response.cookie("session");
|
||||
let claims = session::decode_jwt(session_cookie.value(), &app_state.jwt_decoding_key).unwrap();
|
||||
assert!(session::is_pkce_session(&claims), "A PKCE session should be set");
|
||||
}
|
||||
|
||||
/// Test new user registration via OAuth callback
|
||||
#[tokio::test]
|
||||
async fn test_new_user_registration() {
|
||||
let mut mock = MockOAuthProvider::new();
|
||||
mock.expect_handle_callback().returning(|_, _, _, _| {
|
||||
Ok(AuthUser {
|
||||
id: "newuser123".to_string(),
|
||||
username: "new_user".to_string(),
|
||||
name: None,
|
||||
email: Some("new@example.com".to_string()),
|
||||
email_verified: true,
|
||||
avatar_url: None,
|
||||
})
|
||||
});
|
||||
|
||||
let provider: Arc<dyn OAuthProvider> = Arc::new(mock);
|
||||
let mock_registry = AuthRegistry {
|
||||
providers: HashMap::from([("mock", provider)]),
|
||||
};
|
||||
|
||||
let context = test_context().use_database(true).auth_registry(mock_registry).call().await;
|
||||
|
||||
let response = context.server.get("/api/auth/mock/callback?code=a&state=b").await;
|
||||
assert_eq!(response.status_code(), 302);
|
||||
assert_eq!(response.headers().get("location").unwrap(), "/api/profile");
|
||||
|
||||
// Verify user and oauth_account were created
|
||||
let user = user_repo::find_user_by_email(&context.app_state.db, "new@example.com")
|
||||
.await
|
||||
.unwrap()
|
||||
.expect("User should be created");
|
||||
assert_eq!(user.email, Some("new@example.com".to_string()));
|
||||
|
||||
let providers = user_repo::list_user_providers(&context.app_state.db, user.id).await.unwrap();
|
||||
assert_eq!(providers.len(), 1);
|
||||
assert_eq!(providers[0].provider, "mock");
|
||||
assert_eq!(providers[0].provider_user_id, "newuser123");
|
||||
}
|
||||
|
||||
/// Test sign-in for an existing user with an already-linked provider
|
||||
#[tokio::test]
|
||||
async fn test_existing_user_signin() {
|
||||
let mut mock = MockOAuthProvider::new();
|
||||
mock.expect_handle_callback().returning(|_, _, _, _| {
|
||||
Ok(AuthUser {
|
||||
id: "existing123".to_string(),
|
||||
username: "existing_user".to_string(),
|
||||
name: None,
|
||||
email: Some("existing@example.com".to_string()),
|
||||
email_verified: true,
|
||||
avatar_url: None,
|
||||
})
|
||||
});
|
||||
|
||||
let provider: Arc<dyn OAuthProvider> = Arc::new(mock);
|
||||
let mock_registry = AuthRegistry {
|
||||
providers: HashMap::from([("mock", provider)]),
|
||||
};
|
||||
|
||||
let context = test_context().use_database(true).auth_registry(mock_registry).call().await;
|
||||
|
||||
// Pre-create the user and link
|
||||
let user = user_repo::create_user(&context.app_state.db, Some("existing@example.com"))
|
||||
.await
|
||||
.unwrap();
|
||||
user_repo::link_oauth_account(
|
||||
&context.app_state.db,
|
||||
user.id,
|
||||
"mock",
|
||||
"existing123",
|
||||
Some("existing@example.com"),
|
||||
Some("existing_user"),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = context.server.get("/api/auth/mock/callback?code=a&state=b").await;
|
||||
assert_eq!(response.status_code(), 302, "Should sign in successfully");
|
||||
assert_eq!(response.headers().get("location").unwrap(), "/api/profile");
|
||||
|
||||
// Verify no new user was created
|
||||
let users = sqlx::query("SELECT * FROM users")
|
||||
.fetch_all(&context.app_state.db)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(users.len(), 1, "No new user should be created");
|
||||
}
|
||||
|
||||
/// Test implicit account linking via a shared verified email
|
||||
#[tokio::test]
|
||||
async fn test_implicit_account_linking() {
|
||||
// 1. User signs in with 'provider-a'
|
||||
let mut mock_a = MockOAuthProvider::new();
|
||||
mock_a.expect_handle_callback().returning(|_, _, _, _| {
|
||||
Ok(AuthUser {
|
||||
id: "user_a_123".to_string(),
|
||||
username: "user_a".to_string(),
|
||||
name: None,
|
||||
email: Some("shared@example.com".to_string()),
|
||||
email_verified: true,
|
||||
avatar_url: None,
|
||||
})
|
||||
});
|
||||
|
||||
// 2. Later, the same user signs in with 'provider-b'
|
||||
let mut mock_b = MockOAuthProvider::new();
|
||||
mock_b.expect_handle_callback().returning(|_, _, _, _| {
|
||||
Ok(AuthUser {
|
||||
id: "user_b_456".to_string(),
|
||||
username: "user_b".to_string(),
|
||||
name: None,
|
||||
email: Some("shared@example.com".to_string()),
|
||||
email_verified: true,
|
||||
avatar_url: None,
|
||||
})
|
||||
});
|
||||
|
||||
let provider_a: Arc<dyn OAuthProvider> = Arc::new(mock_a);
|
||||
let provider_b: Arc<dyn OAuthProvider> = Arc::new(mock_b);
|
||||
let mock_registry = AuthRegistry {
|
||||
providers: HashMap::from([("provider-a", provider_a), ("provider-b", provider_b)]),
|
||||
};
|
||||
|
||||
let context = test_context().use_database(true).auth_registry(mock_registry).call().await;
|
||||
|
||||
// Action 1: Sign in with provider-a, creating the initial user
|
||||
let response1 = context.server.get("/api/auth/provider-a/callback?code=a&state=b").await;
|
||||
assert_eq!(response1.status_code(), 302);
|
||||
|
||||
let user = user_repo::find_user_by_email(&context.app_state.db, "shared@example.com")
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let providers1 = user_repo::list_user_providers(&context.app_state.db, user.id).await.unwrap();
|
||||
assert_eq!(providers1.len(), 1);
|
||||
assert_eq!(providers1[0].provider, "provider-a");
|
||||
|
||||
// Action 2: Sign in with provider-b
|
||||
let response2 = context.server.get("/api/auth/provider-b/callback?code=a&state=b").await;
|
||||
assert_eq!(response2.status_code(), 302);
|
||||
|
||||
// Assertions: No new user, but a new provider link
|
||||
let users = sqlx::query("SELECT * FROM users")
|
||||
.fetch_all(&context.app_state.db)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(users.len(), 1, "A new user should NOT have been created");
|
||||
|
||||
let providers2 = user_repo::list_user_providers(&context.app_state.db, user.id).await.unwrap();
|
||||
assert_eq!(providers2.len(), 2, "A new provider should have been linked");
|
||||
assert!(providers2.iter().any(|p| p.provider == "provider-a"));
|
||||
assert!(providers2.iter().any(|p| p.provider == "provider-b"));
|
||||
}
|
||||
|
||||
/// Test that an unverified email does NOT link accounts
|
||||
#[tokio::test]
|
||||
async fn test_unverified_email_creates_new_account() {
|
||||
let mut mock = MockOAuthProvider::new();
|
||||
mock.expect_handle_callback().returning(|_, _, _, _| {
|
||||
Ok(AuthUser {
|
||||
id: "unverified123".to_string(),
|
||||
username: "unverified_user".to_string(),
|
||||
name: None,
|
||||
email: Some("unverified@example.com".to_string()),
|
||||
email_verified: false,
|
||||
avatar_url: None,
|
||||
})
|
||||
});
|
||||
|
||||
let provider: Arc<dyn OAuthProvider> = Arc::new(mock);
|
||||
let mock_registry = AuthRegistry {
|
||||
providers: HashMap::from([("mock", provider)]),
|
||||
};
|
||||
|
||||
let context = test_context().use_database(true).auth_registry(mock_registry).call().await;
|
||||
|
||||
// Pre-create a user with the same email, but they will not be linked.
|
||||
user_repo::create_user(&context.app_state.db, Some("unverified@example.com"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = context.server.get("/api/auth/mock/callback?code=a&state=b").await;
|
||||
assert_eq!(response.status_code(), 302);
|
||||
|
||||
// Should create a second user because the email wasn't trusted for linking
|
||||
let users = sqlx::query("SELECT * FROM users")
|
||||
.fetch_all(&context.app_state.db)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(users.len(), 2, "A new user should be created for the unverified email");
|
||||
}
|
||||
|
||||
/// Test logout functionality
|
||||
#[tokio::test]
|
||||
async fn test_logout_functionality() {
|
||||
let mut mock = MockOAuthProvider::new();
|
||||
mock.expect_handle_callback().returning(|_, _, _, _| {
|
||||
Ok(AuthUser {
|
||||
id: "123".to_string(),
|
||||
username: "testuser".to_string(),
|
||||
name: None,
|
||||
email: Some("test@example.com".to_string()),
|
||||
email_verified: true,
|
||||
avatar_url: None,
|
||||
})
|
||||
});
|
||||
let provider: Arc<dyn OAuthProvider> = Arc::new(mock);
|
||||
let mock_registry = AuthRegistry {
|
||||
providers: HashMap::from([("mock", provider)]),
|
||||
};
|
||||
|
||||
let context = test_context().use_database(true).auth_registry(mock_registry).call().await;
|
||||
|
||||
// Sign in to establish a session
|
||||
let response = context.server.get("/api/auth/mock/callback?code=a&state=b").await;
|
||||
assert_eq!(response.status_code(), 302);
|
||||
|
||||
// Test that the logout handler clears the session cookie and redirects
|
||||
let response = context.server.get("/api/logout").await;
|
||||
|
||||
assert_eq!(response.status_code(), 302);
|
||||
assert!(response.headers().contains_key("location"));
|
||||
|
||||
let cookie = response.cookie("session");
|
||||
assert_eq!(cookie.value(), "removed");
|
||||
assert_eq!(cookie.max_age(), Some(Duration::ZERO));
|
||||
}
|
||||
55
pacman-server/tests/sessions.rs
Normal file
@@ -0,0 +1,55 @@
|
||||
mod common;
|
||||
use crate::common::test_context;
|
||||
use cookie::Cookie;
|
||||
use pacman_server::{data::user as user_repo, session};
|
||||
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_session_management() {
|
||||
let context = test_context().use_database(true).call().await;
|
||||
|
||||
// 1. Create a user and link a provider account
|
||||
let user = user_repo::create_user(&context.app_state.db, Some("test@example.com"))
|
||||
.await
|
||||
.unwrap();
|
||||
let provider_account = user_repo::link_oauth_account(
|
||||
&context.app_state.db,
|
||||
user.id,
|
||||
"test_provider",
|
||||
"123",
|
||||
Some("test@example.com"),
|
||||
Some("testuser"),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// 2. Create a session token for the user
|
||||
let auth_user = pacman_server::auth::provider::AuthUser {
|
||||
id: provider_account.provider_user_id,
|
||||
username: provider_account.username.unwrap(),
|
||||
name: provider_account.display_name,
|
||||
email: user.email,
|
||||
email_verified: true,
|
||||
avatar_url: provider_account.avatar_url,
|
||||
};
|
||||
let token = session::create_jwt_for_user("test_provider", &auth_user, &context.app_state.jwt_encoding_key);
|
||||
|
||||
// 3. Make a request to the protected route WITH the session, expect success
|
||||
let response = context
|
||||
.server
|
||||
.get("/api/profile")
|
||||
.add_cookie(Cookie::new(session::SESSION_COOKIE_NAME, token))
|
||||
.await;
|
||||
assert_eq!(response.status_code(), 200);
|
||||
|
||||
// 4. Sign out
|
||||
let response = context.server.get("/api/logout").await;
|
||||
assert_eq!(response.status_code(), 302); // Redirect after logout
|
||||
|
||||
// 5. Make a request to the protected route without a session, expect failure
|
||||
let response = context.server.get("/api/profile").await;
|
||||
assert_eq!(response.status_code(), 401); // Unauthorized without session
|
||||
}
|
||||
1768
pacman/Cargo.lock
generated
Normal file
95
pacman/Cargo.toml
Normal file
@@ -0,0 +1,95 @@
|
||||
[package]
|
||||
name = "pacman"
|
||||
version = "0.81.1"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version = "1.86.0"
|
||||
description = "A cross-platform retro Pac-Man clone, written in Rust and supported by SDL2"
|
||||
readme.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
license.workspace = true
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
publish.workspace = true
|
||||
exclude = ["/assets/unpacked/**", "/assets/site/**", "/bacon.toml", "/Justfile"]
|
||||
default-run = "pacman"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
bevy_ecs = "0.16.1"
|
||||
glam = "0.30.9"
|
||||
pathfinding = "4.14"
|
||||
tracing = { version = "0.1.41", features = ["max_level_trace", "release_max_level_debug"]}
|
||||
tracing-error = "0.2.0"
|
||||
tracing-subscriber = {version = "0.3.20", features = ["env-filter"]}
|
||||
time = { version = "0.3.44", features = ["formatting", "macros"] }
|
||||
thiserror = "2.0.17"
|
||||
anyhow = "1.0"
|
||||
smallvec = "1.15.1"
|
||||
bitflags = "2.9.4"
|
||||
micromap = "0.1.0"
|
||||
circular-buffer = "=1.1.0"
|
||||
parking_lot = "0.12.5"
|
||||
strum = "0.27.2"
|
||||
strum_macros = "0.27.2"
|
||||
thousands = "0.2.0"
|
||||
num-width = "0.1.0"
|
||||
# While not actively used in code, `build.rs` generates code that relies on this. Keep the versions synchronized.
|
||||
phf = { version = "0.13.1", features = ["macros"] }
|
||||
|
||||
# Windows-specific dependencies
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
# Used for customizing console output on Windows; both are required due to the `windows` crate having poor Result handling with `GetStdHandle`.
|
||||
windows = { version = "0.62.1", features = ["Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Console"] }
|
||||
windows-sys = { version = "0.61.2", features = ["Win32_System_Console"] }
|
||||
|
||||
# Desktop-specific dependencies
|
||||
[target.'cfg(not(target_os = "emscripten"))'.dependencies]
|
||||
# On desktop platforms, build SDL2 with cargo-vcpkg
|
||||
sdl2 = { version = "0.38", default-features = false, features = ["image", "ttf", "gfx", "mixer", "unsafe_textures", "static-link", "use-vcpkg"] }
|
||||
rand = { version = "0.9.2", default-features = false, features = ["thread_rng"] }
|
||||
rust-embed = "8.7.2"
|
||||
spin_sleep = "1.3.3"
|
||||
|
||||
# Browser-specific dependencies
|
||||
[target.'cfg(target_os = "emscripten")'.dependencies]
|
||||
# On Emscripten, we don't use cargo-vcpkg
|
||||
sdl2 = { version = "0.38", default-features = false, features = ["image", "ttf", "gfx", "mixer", "unsafe_textures"] }
|
||||
# TODO: Document why Emscripten cannot use `os_rng`.
|
||||
rand = { version = "0.9.2", default-features = false, features = ["small_rng", "os_rng"] }
|
||||
libc = "0.2.177" # TODO: Describe why this is required.
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = "1.4.1"
|
||||
speculoos = "0.13.0"
|
||||
|
||||
[build-dependencies]
|
||||
phf = { version = "0.13.1", features = ["macros"] }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = "1.0.145"
|
||||
|
||||
# phf generates runtime code which machete will not detect
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = ["phf"]
|
||||
|
||||
|
||||
[package.metadata.vcpkg]
|
||||
dependencies = ["sdl2", "sdl2-image", "sdl2-ttf", "sdl2-gfx", "sdl2-mixer"]
|
||||
git = "https://github.com/microsoft/vcpkg"
|
||||
rev = "2025.10.17" # to check for a new one, check https://github.com/microsoft/vcpkg/releases
|
||||
|
||||
[package.metadata.vcpkg.target]
|
||||
x86_64-pc-windows-msvc = { triplet = "x64-windows-static-md" }
|
||||
x86_64-unknown-linux-gnu = { triplet = "x64-linux" }
|
||||
x86_64-apple-darwin = { triplet = "x64-osx" }
|
||||
aarch64-apple-darwin = { triplet = "arm64-osx" }
|
||||
|
||||
[features]
|
||||
# Windows-specific features
|
||||
force-console = []
|
||||
default = []
|
||||
|
||||
[lints.rust]
|
||||
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(coverage,coverage_nightly)', 'cfg(use_console)'] }
|
||||
|
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
|
Before Width: | Height: | Size: 120 B After Width: | Height: | Size: 120 B |
|
Before Width: | Height: | Size: 120 B After Width: | Height: | Size: 120 B |
|
Before Width: | Height: | Size: 116 B After Width: | Height: | Size: 116 B |
|
Before Width: | Height: | Size: 115 B After Width: | Height: | Size: 115 B |
|
Before Width: | Height: | Size: 192 B After Width: | Height: | Size: 192 B |
|
Before Width: | Height: | Size: 187 B After Width: | Height: | Size: 187 B |
|
Before Width: | Height: | Size: 196 B After Width: | Height: | Size: 196 B |
|
Before Width: | Height: | Size: 215 B After Width: | Height: | Size: 215 B |
|
Before Width: | Height: | Size: 107 B After Width: | Height: | Size: 107 B |
|
Before Width: | Height: | Size: 189 B After Width: | Height: | Size: 189 B |
|
Before Width: | Height: | Size: 115 B After Width: | Height: | Size: 115 B |
|
Before Width: | Height: | Size: 195 B After Width: | Height: | Size: 195 B |
|
Before Width: | Height: | Size: 177 B After Width: | Height: | Size: 177 B |
|
Before Width: | Height: | Size: 177 B After Width: | Height: | Size: 177 B |
|
Before Width: | Height: | Size: 125 B After Width: | Height: | Size: 125 B |
|
Before Width: | Height: | Size: 122 B After Width: | Height: | Size: 122 B |
|
Before Width: | Height: | Size: 173 B After Width: | Height: | Size: 173 B |
|
Before Width: | Height: | Size: 175 B After Width: | Height: | Size: 175 B |
|
Before Width: | Height: | Size: 213 B After Width: | Height: | Size: 213 B |
|
Before Width: | Height: | Size: 178 B After Width: | Height: | Size: 178 B |
|
Before Width: | Height: | Size: 162 B After Width: | Height: | Size: 162 B |
|
Before Width: | Height: | Size: 210 B After Width: | Height: | Size: 210 B |
|
Before Width: | Height: | Size: 177 B After Width: | Height: | Size: 177 B |
|
Before Width: | Height: | Size: 195 B After Width: | Height: | Size: 195 B |
|
Before Width: | Height: | Size: 136 B After Width: | Height: | Size: 136 B |