Compare commits

..

17 Commits

Author SHA1 Message Date
cfb847f2e5 feat: holiday exclusion logic for ICS command 2025-09-13 02:20:27 -05:00
e7d47f1f96 feat: implement ICS command 2025-09-13 01:50:18 -05:00
9a48587479 chore: drop redis 2025-09-13 01:49:47 -05:00
624247ee14 feat: basic activity status 2025-09-13 01:04:46 -05:00
430e2a255b fix: avoid crashing due to odd url parse 2025-09-13 01:01:49 -05:00
bbc78131ec feat: setup recoverable/unrecoverable job error distinction, delete unrecoverable jobs 2025-09-13 00:48:11 -05:00
77ab71d4d5 feat: map RAILWAY_DEPLOYMENT_DRAINING_SECONDS to SHUTDOWN_TIMEOUT 2025-09-13 00:36:11 -05:00
9d720bb0a7 feat: implement common job trait & better interface for scheduler & workers 2025-09-13 00:17:53 -05:00
dcc564dee6 fix: credit_hour_session is optional 2025-09-12 23:50:36 -05:00
4ca55a1fd4 feat: schedule & query jobs efficiently in batches 2025-09-12 23:41:27 -05:00
a6e7adcaef fix: improve json error handling, make email_address optional 2025-09-12 23:36:07 -05:00
752c855dec chore: drop env prefixed config vars 2025-09-12 22:39:32 -05:00
14b02df8f4 feat: much better JSON logging, project-wide logging improvements, better use of debug/trace levels, field attributes 2025-09-12 22:01:14 -05:00
00cb209052 fix: disable poor error snippet 2025-09-12 21:40:07 -05:00
dfc05a2789 feat: setup rate limiter middleware & config 2025-09-12 21:12:06 -05:00
fe798e1867 fix: avoid COPY of non existent dir, add .dockerignore 2025-09-12 20:57:33 -05:00
39688f800f chore: update Dockerfile rust to 1.89.0 2025-09-12 20:53:24 -05:00
31 changed files with 1628 additions and 338 deletions

73
.dockerignore Normal file
View File

@@ -0,0 +1,73 @@
# Build artifacts
target/
**/target/
# Development files
.env
.env.local
.env.*.local
# IDE and editor files
.vscode/
.idea/
*.swp
*.swo
*~
# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# Git
.git/
.gitignore
# Documentation
README.md
docs/
*.md
# Go files (since this is a Rust project)
go/
# Database migrations (if not needed at runtime)
migrations/
diesel_migrations/
diesel.toml
# Development configuration
bacon.toml
.cargo/config.toml
# Logs
*.log
logs/
# Temporary files
tmp/
temp/
*.tmp
# Test files
tests/
**/tests/
*_test.rs
*_tests.rs
# Coverage reports
coverage/
*.gcov
*.gcno
*.gcda
# Profiling
*.prof
# Backup files
*.bak
*.backup

88
Cargo.lock generated
View File

@@ -168,7 +168,7 @@ dependencies = [
[[package]]
name = "banner"
version = "0.1.0"
version = "0.2.0"
dependencies = [
"anyhow",
"async-trait",
@@ -184,15 +184,16 @@ dependencies = [
"futures",
"governor",
"http 1.3.1",
"num-format",
"once_cell",
"poise",
"rand 0.9.2",
"redis",
"regex",
"reqwest 0.12.23",
"reqwest-middleware",
"serde",
"serde_json",
"serde_path_to_error",
"serenity",
"sqlx",
"thiserror 2.0.16",
@@ -336,20 +337,6 @@ dependencies = [
"windows-link 0.2.0",
]
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"futures-core",
"memchr",
"pin-project-lite",
"tokio",
"tokio-util",
]
[[package]]
name = "command_attr"
version = "0.5.3"
@@ -1665,16 +1652,6 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "num-bigint"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9"
dependencies = [
"num-integer",
"num-traits",
]
[[package]]
name = "num-bigint-dig"
version = "0.8.4"
@@ -1698,6 +1675,16 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
[[package]]
name = "num-format"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3"
dependencies = [
"arrayvec",
"itoa",
]
[[package]]
name = "num-integer"
version = "0.1.46"
@@ -2031,17 +2018,6 @@ version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "r2d2"
version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93"
dependencies = [
"log",
"parking_lot",
"scheduled-thread-pool",
]
[[package]]
name = "rand"
version = "0.8.5"
@@ -2110,29 +2086,6 @@ dependencies = [
"bitflags 2.9.4",
]
[[package]]
name = "redis"
version = "0.32.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7cd3650deebc68526b304898b192fa4102a4ef0b9ada24da096559cb60e0eef8"
dependencies = [
"bytes",
"cfg-if",
"combine",
"futures-util",
"itoa",
"num-bigint",
"percent-encoding",
"pin-project-lite",
"r2d2",
"ryu",
"sha1_smol",
"socket2 0.6.0",
"tokio",
"tokio-util",
"url",
]
[[package]]
name = "redox_syscall"
version = "0.5.17"
@@ -2454,15 +2407,6 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "scheduled-thread-pool"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19"
dependencies = [
"parking_lot",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
@@ -2641,12 +2585,6 @@ dependencies = [
"digest",
]
[[package]]
name = "sha1_smol"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d"
[[package]]
name = "sha2"
version = "0.10.9"

View File

@@ -1,6 +1,6 @@
[package]
name = "banner"
version = "0.1.0"
version = "0.2.0"
edition = "2024"
default-run = "banner"
@@ -20,7 +20,6 @@ futures = "0.3"
http = "1.3.1"
poise = "0.6.1"
rand = "0.9.2"
redis = { version = "0.32.5", features = ["tokio-comp", "r2d2"] }
regex = "1.10"
reqwest = { version = "0.12.23", features = ["json", "cookies"] }
reqwest-middleware = { version = "0.4.2", features = ["json"] }
@@ -43,5 +42,7 @@ tracing-subscriber = { version = "0.3.20", features = ["env-filter", "json"] }
url = "2.5"
governor = "0.10.1"
once_cell = "1.21.3"
serde_path_to_error = "0.1.17"
num-format = "0.4.4"
[dev-dependencies]

View File

@@ -1,5 +1,5 @@
# Build Stage
ARG RUST_VERSION=1.86.0
ARG RUST_VERSION=1.89.0
FROM rust:${RUST_VERSION}-bookworm AS builder
# Install build dependencies
@@ -52,7 +52,6 @@ RUN addgroup --gid $GID $APP_USER \
# Copy application files
COPY --from=builder --chown=$APP_USER:$APP_USER /usr/src/banner/target/release/banner ${APP}/banner
COPY --from=builder --chown=$APP_USER:$APP_USER /usr/src/banner/src/fonts ${APP}/fonts
# Set proper permissions
RUN chmod +x ${APP}/banner

View File

@@ -7,8 +7,16 @@ use std::{
};
use crate::banner::{
BannerSession, SessionPool, errors::BannerApiError, json::parse_json_with_context,
middleware::TransparentMiddleware, models::*, nonce, query::SearchQuery, util::user_agent,
BannerSession, SessionPool, create_shared_rate_limiter_with_config,
errors::BannerApiError,
json::parse_json_with_context,
middleware::TransparentMiddleware,
models::*,
nonce,
query::SearchQuery,
rate_limit_middleware::RateLimitMiddleware,
rate_limiter::{RateLimitConfig, SharedRateLimiter, create_shared_rate_limiter},
util::user_agent,
};
use anyhow::{Context, Result, anyhow};
use cookie::Cookie;
@@ -30,6 +38,13 @@ pub struct BannerApi {
impl BannerApi {
/// Creates a new Banner API client.
pub fn new(base_url: String) -> Result<Self> {
Self::new_with_config(base_url, RateLimitConfig::default())
}
/// Creates a new Banner API client with custom rate limiting configuration.
pub fn new_with_config(base_url: String, rate_limit_config: RateLimitConfig) -> Result<Self> {
let rate_limiter = create_shared_rate_limiter_with_config(rate_limit_config);
let http = ClientBuilder::new(
Client::builder()
.cookie_store(false)
@@ -42,6 +57,7 @@ impl BannerApi {
.context("Failed to create HTTP client")?,
)
.with(TransparentMiddleware)
.with(RateLimitMiddleware::new(rate_limiter.clone()))
.build();
Ok(Self {
@@ -50,7 +66,6 @@ impl BannerApi {
base_url,
})
}
/// Validates offset parameter for search methods.
fn validate_offset(offset: i32) -> Result<()> {
if offset <= 0 {
@@ -160,10 +175,9 @@ impl BannerApi {
debug!(
term = term,
query = ?query,
sort = sort,
sort_descending = sort_descending,
"Searching for courses with params: {:?}", params
subject = query.get_subject().map(|s| s.as_str()).unwrap_or("all"),
max_results = query.get_max_results(),
"Searching for courses"
);
let response = self
@@ -184,7 +198,7 @@ impl BannerApi {
let search_result: SearchResult = parse_json_with_context(&body).map_err(|e| {
BannerApiError::RequestFailed(anyhow!(
"Failed to parse search response (status={status}, url={url}): {e}\nBody: {body}"
"Failed to parse search response (status={status}, url={url}): {e}"
))
})?;
@@ -303,6 +317,12 @@ impl BannerApi {
sort: &str,
sort_descending: bool,
) -> Result<SearchResult, BannerApiError> {
debug!(
term = term,
subject = query.get_subject().map(|s| s.as_str()).unwrap_or("all"),
max_results = query.get_max_results(),
"Starting course search"
);
self.perform_search(term, query, sort, sort_descending)
.await
}
@@ -313,6 +333,8 @@ impl BannerApi {
term: &str,
crn: &str,
) -> Result<Option<Course>, BannerApiError> {
debug!(term = term, crn = crn, "Looking up course by CRN");
let query = SearchQuery::new()
.course_reference_number(crn)
.max_results(1);

View File

@@ -1,18 +1,34 @@
//! JSON parsing utilities for the Banner API client.
use anyhow::Result;
use serde_json;
/// Attempt to parse JSON and, on failure, include a contextual snippet of the
/// line where the error occurred. This prevents dumping huge JSON bodies to logs.
pub fn parse_json_with_context<T: serde::de::DeserializeOwned>(body: &str) -> Result<T> {
match serde_json::from_str::<T>(body) {
let jd = &mut serde_json::Deserializer::from_str(body);
match serde_path_to_error::deserialize(jd) {
Ok(value) => Ok(value),
Err(err) => {
let (line, column) = (err.line(), err.column());
let snippet = build_error_snippet(body, line, column, 80);
Err(anyhow::anyhow!(
"{err} at line {line}, column {column}\nSnippet:\n{snippet}",
))
let inner_err = err.inner();
let (line, column) = (inner_err.line(), inner_err.column());
let snippet = build_error_snippet(body, line, column, 20);
let path = err.path().to_string();
let msg = inner_err.to_string();
let loc = format!(" at line {line} column {column}");
let msg_without_loc = msg.strip_suffix(&loc).unwrap_or(&msg).to_string();
let mut final_err = String::new();
if !path.is_empty() && path != "." {
final_err.push_str(&format!("for path '{}' ", path));
}
final_err.push_str(&format!(
"({msg_without_loc}) at line {line} column {column}"
));
final_err.push_str(&format!("\n{snippet}"));
Err(anyhow::anyhow!(final_err))
}
}
}

View File

@@ -41,7 +41,7 @@ impl Middleware for TransparentMiddleware {
}
}
Err(error) => {
warn!(?error, "Request failed (middleware)");
warn!(error = ?error, "Request failed (middleware)");
Err(error)
}
}

View File

@@ -14,6 +14,8 @@ pub mod json;
pub mod middleware;
pub mod models;
pub mod query;
pub mod rate_limiter;
pub mod rate_limit_middleware;
pub mod session;
pub mod util;
@@ -21,4 +23,5 @@ pub use api::*;
pub use errors::*;
pub use models::*;
pub use query::*;
pub use rate_limiter::*;
pub use session::*;

View File

@@ -33,7 +33,7 @@ pub struct FacultyItem {
#[serde(deserialize_with = "deserialize_string_to_u32")]
pub course_reference_number: u32, // CRN, e.g 27294
pub display_name: String, // "LastName, FirstName"
pub email_address: String, // e.g. FirstName.LastName@utsaedu
pub email_address: Option<String>, // e.g. FirstName.LastName@utsaedu
pub primary_indicator: bool,
pub term: String, // e.g "202420"
}
@@ -63,7 +63,7 @@ pub struct MeetingTime {
pub campus: Option<String>, // campus code, e.g 11
pub campus_description: Option<String>, // name of campus, e.g Main Campus
pub course_reference_number: String, // CRN, e.g 27294
pub credit_hour_session: f64, // e.g. 30
pub credit_hour_session: Option<f64>, // e.g. 30
pub hours_week: f64, // e.g. 30
pub meeting_schedule_type: String, // e.g AFF
pub meeting_type: String, // e.g HB, H2, H1, OS, OA, OH, ID, FF
@@ -148,6 +148,8 @@ pub enum DayOfWeek {
impl DayOfWeek {
/// Convert to short string representation
///
/// Do not change these, these are used for ICS generation. Casing does not matter though.
pub fn to_short_string(self) -> &'static str {
match self {
DayOfWeek::Monday => "Mo",

View File

@@ -160,6 +160,16 @@ impl SearchQuery {
self
}
/// Gets the subject field
pub fn get_subject(&self) -> Option<&String> {
self.subject.as_ref()
}
/// Gets the max_results field
pub fn get_max_results(&self) -> i32 {
self.max_results
}
/// Converts the query into URL parameters for the Banner API
pub fn to_params(&self) -> HashMap<String, String> {
let mut params = HashMap::new();

View File

@@ -0,0 +1,101 @@
//! HTTP middleware that enforces rate limiting for Banner API requests.
use crate::banner::rate_limiter::{RequestType, SharedRateLimiter};
use http::Extensions;
use reqwest::{Request, Response};
use reqwest_middleware::{Middleware, Next};
use tracing::{debug, trace, warn};
use url::Url;
/// Middleware that enforces rate limiting based on request URL patterns
pub struct RateLimitMiddleware {
rate_limiter: SharedRateLimiter,
}
impl RateLimitMiddleware {
/// Creates a new rate limiting middleware
pub fn new(rate_limiter: SharedRateLimiter) -> Self {
Self { rate_limiter }
}
/// Determines the request type based on the URL path
fn get_request_type(url: &Url) -> RequestType {
let path = url.path();
if path.contains("/registration")
|| path.contains("/selfServiceMenu")
|| path.contains("/term/termSelection")
{
RequestType::Session
} else if path.contains("/searchResults") || path.contains("/classSearch") {
RequestType::Search
} else if path.contains("/getTerms")
|| path.contains("/getSubjects")
|| path.contains("/getCampuses")
{
RequestType::Metadata
} else if path.contains("/resetDataForm") {
RequestType::Reset
} else {
// Default to search for unknown endpoints
RequestType::Search
}
}
}
#[async_trait::async_trait]
impl Middleware for RateLimitMiddleware {
async fn handle(
&self,
req: Request,
extensions: &mut Extensions,
next: Next<'_>,
) -> std::result::Result<Response, reqwest_middleware::Error> {
let request_type = Self::get_request_type(req.url());
trace!(
url = %req.url(),
request_type = ?request_type,
"Rate limiting request"
);
// Wait for permission to make the request
self.rate_limiter.wait_for_permission(request_type).await;
trace!(
url = %req.url(),
request_type = ?request_type,
"Rate limit permission granted, making request"
);
// Make the actual request
let response_result = next.run(req, extensions).await;
match response_result {
Ok(response) => {
if response.status().is_success() {
trace!(
url = %response.url(),
status = response.status().as_u16(),
"Request completed successfully"
);
} else {
warn!(
url = %response.url(),
status = response.status().as_u16(),
"Request completed with error status"
);
}
Ok(response)
}
Err(error) => {
warn!(
url = ?error.url(),
error = ?error,
"Request failed"
);
Err(error)
}
}
}
}

169
src/banner/rate_limiter.rs Normal file
View File

@@ -0,0 +1,169 @@
//! Rate limiting for Banner API requests to prevent overwhelming the server.
use governor::{
Quota, RateLimiter,
clock::DefaultClock,
state::{InMemoryState, NotKeyed},
};
use std::num::NonZeroU32;
use std::sync::Arc;
use std::time::Duration;
use tracing::{debug, trace, warn};
/// Different types of Banner API requests with different rate limits
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum RequestType {
/// Session creation and management (very conservative)
Session,
/// Course search requests (moderate)
Search,
/// Term and metadata requests (moderate)
Metadata,
/// Data form resets (low priority)
Reset,
}
/// Rate limiter configuration for different request types
#[derive(Debug, Clone)]
pub struct RateLimitConfig {
/// Requests per minute for session operations
pub session_rpm: u32,
/// Requests per minute for search operations
pub search_rpm: u32,
/// Requests per minute for metadata operations
pub metadata_rpm: u32,
/// Requests per minute for reset operations
pub reset_rpm: u32,
/// Burst allowance (extra requests allowed in short bursts)
pub burst_allowance: u32,
}
impl Default for RateLimitConfig {
fn default() -> Self {
Self {
// Very conservative for session creation
session_rpm: 6, // 1 every 10 seconds
// Moderate for search operations
search_rpm: 30, // 1 every 2 seconds
// Moderate for metadata
metadata_rpm: 20, // 1 every 3 seconds
// Low for resets
reset_rpm: 10, // 1 every 6 seconds
// Allow small bursts
burst_allowance: 3,
}
}
}
/// A rate limiter that manages different request types with different limits
pub struct BannerRateLimiter {
session_limiter: RateLimiter<NotKeyed, InMemoryState, DefaultClock>,
search_limiter: RateLimiter<NotKeyed, InMemoryState, DefaultClock>,
metadata_limiter: RateLimiter<NotKeyed, InMemoryState, DefaultClock>,
reset_limiter: RateLimiter<NotKeyed, InMemoryState, DefaultClock>,
config: RateLimitConfig,
}
impl BannerRateLimiter {
/// Creates a new rate limiter with the given configuration
pub fn new(config: RateLimitConfig) -> Self {
let session_quota = Quota::with_period(Duration::from_secs(60) / config.session_rpm)
.unwrap()
.allow_burst(NonZeroU32::new(config.burst_allowance).unwrap());
let search_quota = Quota::with_period(Duration::from_secs(60) / config.search_rpm)
.unwrap()
.allow_burst(NonZeroU32::new(config.burst_allowance).unwrap());
let metadata_quota = Quota::with_period(Duration::from_secs(60) / config.metadata_rpm)
.unwrap()
.allow_burst(NonZeroU32::new(config.burst_allowance).unwrap());
let reset_quota = Quota::with_period(Duration::from_secs(60) / config.reset_rpm)
.unwrap()
.allow_burst(NonZeroU32::new(config.burst_allowance).unwrap());
Self {
session_limiter: RateLimiter::direct(session_quota),
search_limiter: RateLimiter::direct(search_quota),
metadata_limiter: RateLimiter::direct(metadata_quota),
reset_limiter: RateLimiter::direct(reset_quota),
config,
}
}
/// Waits for permission to make a request of the given type
pub async fn wait_for_permission(&self, request_type: RequestType) {
let limiter = match request_type {
RequestType::Session => &self.session_limiter,
RequestType::Search => &self.search_limiter,
RequestType::Metadata => &self.metadata_limiter,
RequestType::Reset => &self.reset_limiter,
};
trace!(request_type = ?request_type, "Waiting for rate limit permission");
// Wait until we can make the request
limiter.until_ready().await;
trace!(request_type = ?request_type, "Rate limit permission granted");
}
/// Checks if a request of the given type would be allowed immediately
pub fn check_permission(&self, request_type: RequestType) -> bool {
let limiter = match request_type {
RequestType::Session => &self.session_limiter,
RequestType::Search => &self.search_limiter,
RequestType::Metadata => &self.metadata_limiter,
RequestType::Reset => &self.reset_limiter,
};
limiter.check().is_ok()
}
/// Gets the current configuration
pub fn config(&self) -> &RateLimitConfig {
&self.config
}
/// Updates the rate limit configuration
pub fn update_config(&mut self, config: RateLimitConfig) {
self.config = config;
// Note: In a production system, you'd want to recreate the limiters
// with the new configuration, but for simplicity we'll just update
// the config field here.
warn!("Rate limit configuration updated - restart required for full effect");
}
}
impl Default for BannerRateLimiter {
fn default() -> Self {
Self::new(RateLimitConfig::default())
}
}
/// A shared rate limiter instance
pub type SharedRateLimiter = Arc<BannerRateLimiter>;
/// Creates a new shared rate limiter with default configuration
pub fn create_shared_rate_limiter() -> SharedRateLimiter {
Arc::new(BannerRateLimiter::default())
}
/// Creates a new shared rate limiter with custom configuration
pub fn create_shared_rate_limiter_with_config(config: RateLimitConfig) -> SharedRateLimiter {
Arc::new(BannerRateLimiter::new(config))
}
/// Conversion from config module's RateLimitingConfig to this module's RateLimitConfig
impl From<crate::config::RateLimitingConfig> for RateLimitConfig {
fn from(config: crate::config::RateLimitingConfig) -> Self {
Self {
session_rpm: config.session_rpm,
search_rpm: config.search_rpm,
metadata_rpm: config.metadata_rpm,
reset_rpm: config.reset_rpm,
burst_allowance: config.burst_allowance,
}
}
}

View File

@@ -16,7 +16,7 @@ use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::sync::{Mutex, Notify};
use tracing::{debug, info};
use tracing::{debug, info, trace};
use url::Url;
const SESSION_EXPIRY: Duration = Duration::from_secs(25 * 60); // 25 minutes
@@ -82,7 +82,7 @@ impl BannerSession {
/// Updates the last activity timestamp
pub fn touch(&mut self) {
debug!(id = self.unique_session_id, "Session was used");
trace!(id = self.unique_session_id, "Session was used");
self.last_activity = Some(Instant::now());
}
@@ -162,7 +162,7 @@ impl TermPool {
async fn release(&self, session: BannerSession) {
let id = session.unique_session_id.clone();
if session.is_expired() {
debug!(id = id, "Session is now expired, dropping.");
trace!(id = id, "Session is now expired, dropping.");
// Wake up a waiter, as it might need to create a new session
// if this was the last one.
self.notifier.notify_one();
@@ -174,10 +174,7 @@ impl TermPool {
let queue_size = queue.len();
drop(queue); // Release lock before notifying
debug!(
id = id,
"Session returned to pool. Queue size is now {queue_size}."
);
trace!(id = id, queue_size, "Session returned to pool");
self.notifier.notify_one();
}
}
@@ -213,13 +210,13 @@ impl SessionPool {
let mut queue = term_pool.sessions.lock().await;
if let Some(session) = queue.pop_front() {
if !session.is_expired() {
debug!(id = session.unique_session_id, "Reusing session from pool");
trace!(id = session.unique_session_id, "Reusing session from pool");
return Ok(PooledSession {
session: Some(session),
pool: Arc::clone(&term_pool),
});
} else {
debug!(
trace!(
id = session.unique_session_id,
"Popped an expired session, discarding."
);
@@ -232,7 +229,7 @@ impl SessionPool {
if *is_creating_guard {
// Another task is already creating a session. Release the lock and wait.
drop(is_creating_guard);
debug!("Another task is creating a session, waiting for notification...");
trace!("Another task is creating a session, waiting for notification...");
term_pool.notifier.notified().await;
// Loop back to the top to try the fast path again.
continue;
@@ -243,12 +240,12 @@ impl SessionPool {
drop(is_creating_guard);
// Race: wait for a session to be returned OR for the rate limiter to allow a new one.
debug!("Pool empty, racing notifier vs rate limiter...");
trace!("Pool empty, racing notifier vs rate limiter...");
tokio::select! {
_ = term_pool.notifier.notified() => {
// A session was returned while we were waiting!
// We are no longer the creator. Reset the flag and loop to race for the new session.
debug!("Notified that a session was returned. Looping to retry.");
trace!("Notified that a session was returned. Looping to retry.");
let mut guard = term_pool.is_creating.lock().await;
*guard = false;
drop(guard);
@@ -256,7 +253,7 @@ impl SessionPool {
}
_ = SESSION_CREATION_RATE_LIMITER.until_ready() => {
// The rate limit has elapsed. It's our job to create the session.
debug!("Rate limiter ready. Proceeding to create a new session.");
trace!("Rate limiter ready. Proceeding to create a new session.");
let new_session_result = self.create_session(&term).await;
// After creation, we are no longer the creator. Reset the flag
@@ -286,7 +283,7 @@ impl SessionPool {
/// Sets up initial session cookies by making required Banner API requests
pub async fn create_session(&self, term: &Term) -> Result<BannerSession> {
info!("setting up banner session for term {term}");
info!(term = %term, "setting up banner session");
// The 'register' or 'search' registration page
let initial_registration = self
@@ -317,7 +314,7 @@ impl SessionPool {
let ssb_cookie = cookies.get("SSB_COOKIE").unwrap();
let cookie_header = format!("JSESSIONID={}; SSB_COOKIE={}", jsessionid, ssb_cookie);
debug!(
trace!(
jsessionid = jsessionid,
ssb_cookie = ssb_cookie,
"New session cookies acquired"
@@ -457,7 +454,7 @@ impl SessionPool {
));
}
debug!(term = term, "successfully selected term");
trace!(term = term, "successfully selected term");
Ok(())
}
}

View File

@@ -23,8 +23,7 @@ async fn main() -> Result<()> {
// Load configuration
let config: Config = Figment::new()
.merge(Env::raw().only(&["DATABASE_URL"]))
.merge(Env::prefixed("APP_"))
.merge(Env::raw())
.extract()
.expect("Failed to load config");
@@ -34,7 +33,9 @@ async fn main() -> Result<()> {
);
// Create Banner API client
let banner_api = BannerApi::new(config.banner_base_url).expect("Failed to create BannerApi");
let banner_api =
BannerApi::new_with_config(config.banner_base_url, config.rate_limiting.into())
.expect("Failed to create BannerApi");
// Get current term
let term = Term::get_current().inner().to_string();
@@ -67,11 +68,11 @@ async fn main() -> Result<()> {
),
];
info!("Executing {} concurrent searches", queries.len());
info!(query_count = queries.len(), "Executing concurrent searches");
// Execute all searches concurrently
let search_futures = queries.into_iter().map(|(label, query)| {
info!("Starting search: {}", label);
info!(label = %label, "Starting search");
let banner_api = &banner_api;
let term = &term;
async move {

View File

@@ -77,7 +77,7 @@ pub async fn gcal(
)
.await?;
info!("gcal command completed for CRN: {}", crn);
info!(crn = %crn, "gcal command completed");
Ok(())
}

View File

@@ -1,8 +1,111 @@
//! ICS command implementation for generating calendar files.
use crate::banner::{Course, MeetingScheduleInfo};
use crate::bot::{Context, Error, utils};
use chrono::{Datelike, NaiveDate, Utc};
use serenity::all::CreateAttachment;
use tracing::info;
/// Represents a holiday or special day that should be excluded from class schedules
#[derive(Debug, Clone)]
enum Holiday {
/// A single-day holiday
Single { month: u32, day: u32 },
/// A multi-day holiday range
Range {
month: u32,
start_day: u32,
end_day: u32,
},
}
impl Holiday {
/// Check if a specific date falls within this holiday
fn contains_date(&self, date: NaiveDate) -> bool {
match self {
Holiday::Single { month, day, .. } => date.month() == *month && date.day() == *day,
Holiday::Range {
month,
start_day,
end_day,
..
} => date.month() == *month && date.day() >= *start_day && date.day() <= *end_day,
}
}
/// Get all dates in this holiday for a given year
fn get_dates_for_year(&self, year: i32) -> Vec<NaiveDate> {
match self {
Holiday::Single { month, day, .. } => {
if let Some(date) = NaiveDate::from_ymd_opt(year, *month, *day) {
vec![date]
} else {
Vec::new()
}
}
Holiday::Range {
month,
start_day,
end_day,
..
} => {
let mut dates = Vec::new();
for day in *start_day..=*end_day {
if let Some(date) = NaiveDate::from_ymd_opt(year, *month, day) {
dates.push(date);
}
}
dates
}
}
}
}
/// University holidays that should be excluded from class schedules
const UNIVERSITY_HOLIDAYS: &[(&'static str, Holiday)] = &[
("Labor Day", Holiday::Single { month: 9, day: 1 }),
(
"Fall Break",
Holiday::Range {
month: 10,
start_day: 13,
end_day: 14,
},
),
(
"Unspecified Holiday",
Holiday::Single { month: 11, day: 26 },
),
(
"Thanksgiving",
Holiday::Range {
month: 11,
start_day: 28,
end_day: 29,
},
),
("Student Study Day", Holiday::Single { month: 12, day: 5 }),
(
"Winter Holiday",
Holiday::Range {
month: 12,
start_day: 23,
end_day: 31,
},
),
("New Year's Day", Holiday::Single { month: 1, day: 1 }),
("MLK Day", Holiday::Single { month: 1, day: 20 }),
(
"Spring Break",
Holiday::Range {
month: 3,
start_day: 10,
end_day: 15,
},
),
("Student Study Day", Holiday::Single { month: 5, day: 9 }),
];
/// Generate an ICS file for a course
#[poise::command(slash_command, prefix_command)]
pub async fn ics(
@@ -12,14 +115,322 @@ pub async fn ics(
ctx.defer().await?;
let course = utils::get_course_by_crn(&ctx, crn).await?;
let term = course.term.clone();
// TODO: Implement actual ICS file generation
ctx.say(format!(
"ICS generation for '{}' is not yet implemented.",
course.display_title()
))
// Get meeting times
let meeting_times = ctx
.data()
.app_state
.banner_api
.get_course_meeting_time(&term, &crn.to_string())
.await?;
if meeting_times.is_empty() {
ctx.say("No meeting times found for this course.").await?;
return Ok(());
}
// Sort meeting times by start time
let mut sorted_meeting_times = meeting_times.to_vec();
sorted_meeting_times.sort_unstable_by(|a, b| match (&a.time_range, &b.time_range) {
(Some(a_time), Some(b_time)) => a_time.start.cmp(&b_time.start),
(Some(_), None) => std::cmp::Ordering::Less,
(None, Some(_)) => std::cmp::Ordering::Greater,
(None, None) => a.days.bits().cmp(&b.days.bits()),
});
// Generate ICS content
let (ics_content, excluded_holidays) =
generate_ics_content(&course, &term, &sorted_meeting_times)?;
// Create file attachment
let filename = format!(
"{subject}_{number}_{section}.ics",
subject = course.subject.replace(" ", "_"),
number = course.course_number,
section = course.sequence_number,
);
let file = CreateAttachment::bytes(ics_content.into_bytes(), filename.clone());
// Build response content
let mut response_content = format!(
"📅 Generated ICS calendar for **{}**\n\n**Meeting Times:**\n{}",
course.display_title(),
sorted_meeting_times
.iter()
.enumerate()
.map(|(i, m)| {
let time_info = match &m.time_range {
Some(range) => format!(
"{} {}",
m.days_string().unwrap_or("TBA".to_string()),
range.format_12hr()
),
None => m.days_string().unwrap_or("TBA".to_string()),
};
format!("{}. {}", i + 1, time_info)
})
.collect::<Vec<_>>()
.join("\n")
);
// Add holiday exclusion information
if !excluded_holidays.is_empty() {
let count = excluded_holidays.len();
let count_text = if count == 1 {
"1 date was".to_string()
} else {
format!("{} dates were", count)
};
response_content.push_str(&format!("\n\n{} excluded from the ICS file:\n", count_text));
response_content.push_str(
&excluded_holidays
.iter()
.map(|s| format!("- {}", s))
.collect::<Vec<_>>()
.join("\n"),
);
}
ctx.send(
poise::CreateReply::default()
.content(response_content)
.attachment(file),
)
.await?;
info!("ics command completed for CRN: {}", crn);
info!(crn = %crn, "ics command completed");
Ok(())
}
/// Generate ICS content for a course and its meeting times
fn generate_ics_content(
course: &Course,
term: &str,
meeting_times: &[MeetingScheduleInfo],
) -> Result<(String, Vec<String>), anyhow::Error> {
let mut ics_content = String::new();
let mut excluded_holidays = Vec::new();
// ICS header
ics_content.push_str("BEGIN:VCALENDAR\r\n");
ics_content.push_str("VERSION:2.0\r\n");
ics_content.push_str("PRODID:-//Banner Bot//Course Calendar//EN\r\n");
ics_content.push_str("CALSCALE:GREGORIAN\r\n");
ics_content.push_str("METHOD:PUBLISH\r\n");
// Calendar name
ics_content.push_str(&format!(
"X-WR-CALNAME:{} - {}\r\n",
course.display_title(),
term
));
// Generate events for each meeting time
for (index, meeting_time) in meeting_times.iter().enumerate() {
let (event_content, holidays) = generate_event_content(course, meeting_time, index)?;
ics_content.push_str(&event_content);
excluded_holidays.extend(holidays);
}
// ICS footer
ics_content.push_str("END:VCALENDAR\r\n");
Ok((ics_content, excluded_holidays))
}
/// Generate ICS event content for a single meeting time
fn generate_event_content(
course: &Course,
meeting_time: &MeetingScheduleInfo,
index: usize,
) -> Result<(String, Vec<String>), anyhow::Error> {
let course_title = course.display_title();
let instructor_name = course.primary_instructor_name();
let location = meeting_time.place_string();
// Create event title with meeting index if multiple meetings
let event_title = if index > 0 {
format!("{} (Meeting {})", course_title, index + 1)
} else {
course_title
};
// Create event description
let description = format!(
"CRN: {}\\nInstructor: {}\\nDays: {}\\nMeeting Type: {}",
course.course_reference_number,
instructor_name,
meeting_time.days_string().unwrap_or("TBA".to_string()),
meeting_time.meeting_type.description()
);
// Get start and end times
let (start_dt, end_dt) = meeting_time.datetime_range();
// Format datetimes for ICS (UTC format)
let start_utc = start_dt.with_timezone(&Utc);
let end_utc = end_dt.with_timezone(&Utc);
let start_str = start_utc.format("%Y%m%dT%H%M%SZ").to_string();
let end_str = end_utc.format("%Y%m%dT%H%M%SZ").to_string();
// Generate unique ID for the event
let uid = format!(
"{}-{}-{}@banner-bot.local",
course.course_reference_number,
index,
start_utc.timestamp()
);
let mut event_content = String::new();
// Event header
event_content.push_str("BEGIN:VEVENT\r\n");
event_content.push_str(&format!("UID:{}\r\n", uid));
event_content.push_str(&format!("DTSTART:{}\r\n", start_str));
event_content.push_str(&format!("DTEND:{}\r\n", end_str));
event_content.push_str(&format!("SUMMARY:{}\r\n", escape_ics_text(&event_title)));
event_content.push_str(&format!(
"DESCRIPTION:{}\r\n",
escape_ics_text(&description)
));
event_content.push_str(&format!("LOCATION:{}\r\n", escape_ics_text(&location)));
// Add recurrence rule if there are specific days and times
if !meeting_time.days.is_empty() && meeting_time.time_range.is_some() {
let days_of_week = meeting_time.days_of_week();
let by_day: Vec<String> = days_of_week
.iter()
.map(|day| day.to_short_string().to_uppercase())
.collect();
if !by_day.is_empty() {
let until_date = meeting_time
.date_range
.end
.format("%Y%m%dT000000Z")
.to_string();
event_content.push_str(&format!(
"RRULE:FREQ=WEEKLY;BYDAY={};UNTIL={}\r\n",
by_day.join(","),
until_date
));
// Add holiday exceptions (EXDATE) if the class would meet on holiday dates
let holiday_exceptions = get_holiday_exceptions(meeting_time);
if let Some(exdate_property) = generate_exdate_property(&holiday_exceptions, start_utc)
{
event_content.push_str(&format!("{}\r\n", exdate_property));
}
// Collect holiday names for reporting
let mut holiday_names = Vec::new();
for (holiday_name, holiday) in UNIVERSITY_HOLIDAYS {
for &exception_date in &holiday_exceptions {
if holiday.contains_date(exception_date) {
holiday_names.push(format!(
"{} ({})",
holiday_name,
exception_date.format("%a, %b %d")
));
}
}
}
holiday_names.sort();
holiday_names.dedup();
return Ok((event_content, holiday_names));
}
}
// Event footer
event_content.push_str("END:VEVENT\r\n");
Ok((event_content, Vec::new()))
}
/// Convert chrono::Weekday to the custom DayOfWeek enum
fn chrono_weekday_to_day_of_week(weekday: chrono::Weekday) -> crate::banner::meetings::DayOfWeek {
use crate::banner::meetings::DayOfWeek;
match weekday {
chrono::Weekday::Mon => DayOfWeek::Monday,
chrono::Weekday::Tue => DayOfWeek::Tuesday,
chrono::Weekday::Wed => DayOfWeek::Wednesday,
chrono::Weekday::Thu => DayOfWeek::Thursday,
chrono::Weekday::Fri => DayOfWeek::Friday,
chrono::Weekday::Sat => DayOfWeek::Saturday,
chrono::Weekday::Sun => DayOfWeek::Sunday,
}
}
/// Check if a class meets on a specific date based on its meeting days
fn class_meets_on_date(meeting_time: &MeetingScheduleInfo, date: NaiveDate) -> bool {
let weekday = chrono_weekday_to_day_of_week(date.weekday());
let meeting_days = meeting_time.days_of_week();
meeting_days.contains(&weekday)
}
/// Get holiday dates that fall within the course date range and would conflict with class meetings
fn get_holiday_exceptions(meeting_time: &MeetingScheduleInfo) -> Vec<NaiveDate> {
let mut exceptions = Vec::new();
// Get the year range from the course date range
let start_year = meeting_time.date_range.start.year();
let end_year = meeting_time.date_range.end.year();
for (_, holiday) in UNIVERSITY_HOLIDAYS {
// Check for the holiday in each year of the course
for year in start_year..=end_year {
let holiday_dates = holiday.get_dates_for_year(year);
for holiday_date in holiday_dates {
// Check if the holiday falls within the course date range
if holiday_date >= meeting_time.date_range.start
&& holiday_date <= meeting_time.date_range.end
{
// Check if the class would actually meet on this day
if class_meets_on_date(meeting_time, holiday_date) {
exceptions.push(holiday_date);
}
}
}
}
}
exceptions
}
/// Generate EXDATE property for holiday exceptions
fn generate_exdate_property(
exceptions: &[NaiveDate],
start_time: chrono::DateTime<Utc>,
) -> Option<String> {
if exceptions.is_empty() {
return None;
}
let mut exdate_values = Vec::new();
for &exception_date in exceptions {
// Create a datetime for the exception using the same time as the start time
let exception_datetime = exception_date.and_time(start_time.time()).and_utc();
let exdate_str = exception_datetime.format("%Y%m%dT%H%M%SZ").to_string();
exdate_values.push(exdate_str);
}
Some(format!("EXDATE:{}", exdate_values.join(",")))
}
/// Escape text for ICS format
fn escape_ics_text(text: &str) -> String {
text.replace("\\", "\\\\")
.replace(";", "\\;")
.replace(",", "\\,")
.replace("\n", "\\n")
.replace("\r", "")
}

View File

@@ -20,6 +20,6 @@ pub async fn time(
))
.await?;
info!("time command completed for CRN: {}", crn);
info!(crn = %crn, "time command completed");
Ok(())
}

View File

@@ -18,7 +18,7 @@ pub async fn get_course_by_crn(ctx: &Context<'_>, crn: i32) -> Result<Course> {
.get_course_or_fetch(&term.to_string(), &crn.to_string())
.await
.map_err(|e| {
error!(%e, crn, "failed to fetch course data");
error!(error = %e, crn = %crn, "failed to fetch course data");
e
})
}

View File

@@ -8,7 +8,7 @@ use fundu::{DurationParser, TimeUnit};
use serde::{Deserialize, Deserializer};
use std::time::Duration;
/// Application configuration loaded from environment variables
/// Main application configuration containing all sub-configurations
#[derive(Deserialize)]
pub struct Config {
/// Log level for the application
@@ -20,19 +20,11 @@ pub struct Config {
/// Defaults to "info" if not specified
#[serde(default = "default_log_level")]
pub log_level: String,
/// Discord bot token for authentication
pub bot_token: String,
/// Port for the web server
#[serde(default = "default_port")]
pub port: u16,
/// Database connection URL
pub database_url: String,
/// Redis connection URL
pub redis_url: String,
/// Base URL for banner generation service
pub banner_base_url: String,
/// Target Discord guild ID where the bot operates
pub bot_target_guild: u64,
/// Graceful shutdown timeout duration
///
/// Accepts both numeric values (seconds) and duration strings
@@ -42,6 +34,16 @@ pub struct Config {
deserialize_with = "deserialize_duration"
)]
pub shutdown_timeout: Duration,
/// Discord bot token for authentication
pub bot_token: String,
/// Target Discord guild ID where the bot operates
pub bot_target_guild: u64,
/// Base URL for banner generation service
pub banner_base_url: String,
/// Rate limiting configuration for Banner API requests
#[serde(default = "default_rate_limiting")]
pub rate_limiting: RateLimitingConfig,
}
/// Default log level of "info"
@@ -59,6 +61,62 @@ fn default_shutdown_timeout() -> Duration {
Duration::from_secs(8)
}
/// Rate limiting configuration for Banner API requests
#[derive(Deserialize, Clone, Debug)]
pub struct RateLimitingConfig {
/// Requests per minute for session operations (very conservative)
#[serde(default = "default_session_rpm")]
pub session_rpm: u32,
/// Requests per minute for search operations (moderate)
#[serde(default = "default_search_rpm")]
pub search_rpm: u32,
/// Requests per minute for metadata operations (moderate)
#[serde(default = "default_metadata_rpm")]
pub metadata_rpm: u32,
/// Requests per minute for reset operations (low priority)
#[serde(default = "default_reset_rpm")]
pub reset_rpm: u32,
/// Burst allowance (extra requests allowed in short bursts)
#[serde(default = "default_burst_allowance")]
pub burst_allowance: u32,
}
/// Default rate limiting configuration
fn default_rate_limiting() -> RateLimitingConfig {
RateLimitingConfig {
session_rpm: default_session_rpm(),
search_rpm: default_search_rpm(),
metadata_rpm: default_metadata_rpm(),
reset_rpm: default_reset_rpm(),
burst_allowance: default_burst_allowance(),
}
}
/// Default session requests per minute (6 = 1 every 10 seconds)
fn default_session_rpm() -> u32 {
6
}
/// Default search requests per minute (30 = 1 every 2 seconds)
fn default_search_rpm() -> u32 {
30
}
/// Default metadata requests per minute (20 = 1 every 3 seconds)
fn default_metadata_rpm() -> u32 {
20
}
/// Default reset requests per minute (10 = 1 every 6 seconds)
fn default_reset_rpm() -> u32 {
10
}
/// Default burst allowance (3 extra requests)
fn default_burst_allowance() -> u32 {
3
}
/// Duration parser configured to handle various time units with seconds as default
///
/// Supports:

243
src/formatter.rs Normal file
View File

@@ -0,0 +1,243 @@
//! Custom tracing formatter
use serde::Serialize;
use serde_json::{Map, Value};
use std::fmt;
use time::macros::format_description;
use time::{OffsetDateTime, format_description::FormatItem};
use tracing::field::{Field, Visit};
use tracing::{Event, Level, Subscriber};
use tracing_subscriber::fmt::format::Writer;
use tracing_subscriber::fmt::{FmtContext, FormatEvent, FormatFields, FormattedFields};
use tracing_subscriber::registry::LookupSpan;
/// Cached format description for timestamps
/// Uses 3 subsecond digits on Emscripten, 5 otherwise for better performance
#[cfg(target_os = "emscripten")]
const TIMESTAMP_FORMAT: &[FormatItem<'static>] =
format_description!("[hour]:[minute]:[second].[subsecond digits:3]");
#[cfg(not(target_os = "emscripten"))]
const TIMESTAMP_FORMAT: &[FormatItem<'static>] =
format_description!("[hour]:[minute]:[second].[subsecond digits:5]");
/// A custom formatter with enhanced timestamp formatting
///
/// Re-implementation of the Full formatter with improved timestamp display.
pub struct CustomFormatter;
/// A custom JSON formatter that flattens fields to root level
///
/// Outputs logs in the format: { "message": "...", "level": "...", "customAttribute": "..." }
pub struct CustomJsonFormatter;
impl<S, N> FormatEvent<S, N> for CustomFormatter
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
// 1) Timestamp (dimmed when ANSI)
let now = OffsetDateTime::now_utc();
let formatted_time = now.format(&TIMESTAMP_FORMAT).map_err(|e| {
eprintln!("Failed to format timestamp: {}", e);
fmt::Error
})?;
write_dimmed(&mut writer, formatted_time)?;
writer.write_char(' ')?;
// 2) Colored 5-char level like Full
write_colored_level(&mut writer, meta.level())?;
writer.write_char(' ')?;
// 3) Span scope chain (bold names, fields in braces, dimmed ':')
if let Some(scope) = ctx.event_scope() {
let mut saw_any = false;
for span in scope.from_root() {
write_bold(&mut writer, span.metadata().name())?;
saw_any = true;
let ext = span.extensions();
if let Some(fields) = &ext.get::<FormattedFields<N>>() {
if !fields.is_empty() {
write_bold(&mut writer, "{")?;
write!(writer, "{}", fields)?;
write_bold(&mut writer, "}")?;
}
}
if writer.has_ansi_escapes() {
write!(writer, "\x1b[2m:\x1b[0m")?;
} else {
writer.write_char(':')?;
}
}
if saw_any {
writer.write_char(' ')?;
}
}
// 4) Target (dimmed), then a space
if writer.has_ansi_escapes() {
write!(writer, "\x1b[2m{}\x1b[0m\x1b[2m:\x1b[0m ", meta.target())?;
} else {
write!(writer, "{}: ", meta.target())?;
}
// 5) Event fields
ctx.format_fields(writer.by_ref(), event)?;
// 6) Newline
writeln!(writer)
}
}
impl<S, N> FormatEvent<S, N> for CustomJsonFormatter
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> FormatFields<'a> + 'static,
{
fn format_event(
&self,
_ctx: &FmtContext<'_, S, N>,
mut writer: Writer<'_>,
event: &Event<'_>,
) -> fmt::Result {
let meta = event.metadata();
#[derive(Serialize)]
struct EventFields {
message: String,
level: String,
target: String,
#[serde(flatten)]
fields: Map<String, Value>,
}
let (message, fields) = {
let mut message: Option<String> = None;
let mut fields: Map<String, Value> = Map::new();
struct FieldVisitor<'a> {
message: &'a mut Option<String>,
fields: &'a mut Map<String, Value>,
}
impl<'a> Visit for FieldVisitor<'a> {
fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) {
let key = field.name();
if key == "message" {
*self.message = Some(format!("{:?}", value));
} else {
// Use typed methods for better performance
self.fields
.insert(key.to_string(), Value::String(format!("{:?}", value)));
}
}
fn record_str(&mut self, field: &Field, value: &str) {
let key = field.name();
if key == "message" {
*self.message = Some(value.to_string());
} else {
self.fields
.insert(key.to_string(), Value::String(value.to_string()));
}
}
fn record_i64(&mut self, field: &Field, value: i64) {
let key = field.name();
if key != "message" {
self.fields.insert(
key.to_string(),
Value::Number(serde_json::Number::from(value)),
);
}
}
fn record_u64(&mut self, field: &Field, value: u64) {
let key = field.name();
if key != "message" {
self.fields.insert(
key.to_string(),
Value::Number(serde_json::Number::from(value)),
);
}
}
fn record_bool(&mut self, field: &Field, value: bool) {
let key = field.name();
if key != "message" {
self.fields.insert(key.to_string(), Value::Bool(value));
}
}
}
let mut visitor = FieldVisitor {
message: &mut message,
fields: &mut fields,
};
event.record(&mut visitor);
(message, fields)
};
let json = EventFields {
message: message.unwrap_or_default(),
level: meta.level().to_string(),
target: meta.target().to_string(),
fields,
};
writeln!(
writer,
"{}",
serde_json::to_string(&json).unwrap_or_else(|_| "{}".to_string())
)
}
}
/// Write the verbosity level with the same coloring/alignment as the Full formatter.
fn write_colored_level(writer: &mut Writer<'_>, level: &Level) -> fmt::Result {
if writer.has_ansi_escapes() {
// Basic ANSI color sequences; reset with \x1b[0m
let (color, text) = match *level {
Level::TRACE => ("\x1b[35m", "TRACE"), // purple
Level::DEBUG => ("\x1b[34m", "DEBUG"), // blue
Level::INFO => ("\x1b[32m", " INFO"), // green, note leading space
Level::WARN => ("\x1b[33m", " WARN"), // yellow, note leading space
Level::ERROR => ("\x1b[31m", "ERROR"), // red
};
write!(writer, "{}{}\x1b[0m", color, text)
} else {
// Right-pad to width 5 like Full's non-ANSI mode
match *level {
Level::TRACE => write!(writer, "{:>5}", "TRACE"),
Level::DEBUG => write!(writer, "{:>5}", "DEBUG"),
Level::INFO => write!(writer, "{:>5}", " INFO"),
Level::WARN => write!(writer, "{:>5}", " WARN"),
Level::ERROR => write!(writer, "{:>5}", "ERROR"),
}
}
}
fn write_dimmed(writer: &mut Writer<'_>, s: impl fmt::Display) -> fmt::Result {
if writer.has_ansi_escapes() {
write!(writer, "\x1b[2m{}\x1b[0m", s)
} else {
write!(writer, "{}", s)
}
}
fn write_bold(writer: &mut Writer<'_>, s: impl fmt::Display) -> fmt::Result {
if writer.has_ansi_escapes() {
write!(writer, "\x1b[1m{}\x1b[0m", s)
} else {
write!(writer, "{}", s)
}
}

View File

@@ -1,4 +1,6 @@
use serenity::all::{ClientBuilder, GatewayIntents};
use figment::value::UncasedStr;
use num_format::{Locale, ToFormattedString};
use serenity::all::{ActivityData, ClientBuilder, GatewayIntents};
use tokio::signal;
use tracing::{error, info, warn};
use tracing_subscriber::{EnvFilter, FmtSubscriber};
@@ -20,30 +22,66 @@ mod bot;
mod config;
mod data;
mod error;
mod formatter;
mod scraper;
mod services;
mod state;
mod web;
async fn update_bot_status(
ctx: &serenity::all::Context,
app_state: &AppState,
) -> Result<(), anyhow::Error> {
let course_count = app_state.get_course_count().await?;
ctx.set_activity(Some(ActivityData::playing(format!(
"Querying {:} classes",
course_count.to_formatted_string(&Locale::en)
))));
tracing::info!(course_count = course_count, "Updated bot status");
Ok(())
}
#[tokio::main]
async fn main() {
dotenvy::dotenv().ok();
// Configure logging
let filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("warn,banner=debug"));
// Load configuration first to get log level
let config: Config = Figment::new()
.merge(Env::raw().map(|k| {
if k == UncasedStr::new("RAILWAY_DEPLOYMENT_DRAINING_SECONDS") {
"SHUTDOWN_TIMEOUT".into()
} else {
k.into()
}
}))
.extract()
.expect("Failed to load config");
// Configure logging based on config
let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| {
let base_level = &config.log_level;
EnvFilter::new(&format!(
"warn,banner={},banner::rate_limiter=warn,banner::session=warn,banner::rate_limit_middleware=warn",
base_level
))
});
let subscriber = {
#[cfg(debug_assertions)]
{
FmtSubscriber::builder()
.with_target(true)
.event_format(formatter::CustomFormatter)
}
#[cfg(not(debug_assertions))]
{
FmtSubscriber::builder().json()
FmtSubscriber::builder()
.with_target(true)
.event_format(formatter::CustomJsonFormatter)
}
}
.with_env_filter(filter)
.with_target(true)
.finish();
tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed");
@@ -58,12 +96,6 @@ async fn main() {
"starting banner"
);
let config: Config = Figment::new()
.merge(Env::raw().only(&["DATABASE_URL"]))
.merge(Env::prefixed("APP_"))
.extract()
.expect("Failed to load config");
// Create database connection pool
let db_pool = PgPoolOptions::new()
.max_connections(10)
@@ -79,12 +111,14 @@ async fn main() {
);
// Create BannerApi and AppState
let banner_api =
BannerApi::new(config.banner_base_url.clone()).expect("Failed to create BannerApi");
let banner_api = BannerApi::new_with_config(
config.banner_base_url.clone(),
config.rate_limiting.clone().into(),
)
.expect("Failed to create BannerApi");
let banner_api_arc = Arc::new(banner_api);
let app_state = AppState::new(banner_api_arc.clone(), &config.redis_url)
.expect("Failed to create AppState");
let app_state = AppState::new(banner_api_arc.clone(), db_pool.clone());
// Create BannerState for web service
let banner_state = BannerState {
@@ -138,7 +172,7 @@ async fn main() {
on_error: |error| {
Box::pin(async move {
if let Err(e) = poise::builtins::on_error(error).await {
tracing::error!("Fatal error while sending error message: {}", e);
tracing::error!(error = %e, "Fatal error while sending error message");
}
// error!(error = ?error, "command error");
})
@@ -155,6 +189,27 @@ async fn main() {
)
.await?;
poise::builtins::register_globally(ctx, &framework.options().commands).await?;
// Start status update task
let status_app_state = app_state.clone();
let status_ctx = ctx.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
// Update status immediately on startup
if let Err(e) = update_bot_status(&status_ctx, &status_app_state).await {
tracing::error!(error = %e, "Failed to update status on startup");
}
loop {
interval.tick().await;
if let Err(e) = update_bot_status(&status_ctx, &status_app_state).await {
tracing::error!(error = %e, "Failed to update bot status");
}
}
});
Ok(Data { app_state })
})
})

112
src/scraper/jobs/mod.rs Normal file
View File

@@ -0,0 +1,112 @@
pub mod subject;
use crate::banner::BannerApi;
use crate::data::models::TargetType;
use crate::error::Result;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::fmt;
/// Errors that can occur during job parsing
#[derive(Debug)]
pub enum JobParseError {
InvalidJson(serde_json::Error),
UnsupportedTargetType(TargetType),
MissingRequiredField(String),
}
impl fmt::Display for JobParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
JobParseError::InvalidJson(e) => write!(f, "Invalid JSON in job payload: {}", e),
JobParseError::UnsupportedTargetType(t) => {
write!(f, "Unsupported target type: {:?}", t)
}
JobParseError::MissingRequiredField(field) => {
write!(f, "Missing required field: {}", field)
}
}
}
}
impl std::error::Error for JobParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
JobParseError::InvalidJson(e) => Some(e),
_ => None,
}
}
}
/// Errors that can occur during job processing
#[derive(Debug)]
pub enum JobError {
Recoverable(anyhow::Error), // API failures, network issues
Unrecoverable(anyhow::Error), // Parse errors, corrupted data
}
impl fmt::Display for JobError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
JobError::Recoverable(e) => write!(f, "Recoverable error: {}", e),
JobError::Unrecoverable(e) => write!(f, "Unrecoverable error: {}", e),
}
}
}
impl std::error::Error for JobError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
JobError::Recoverable(e) => e.source(),
JobError::Unrecoverable(e) => e.source(),
}
}
}
/// Common trait interface for all job types
#[async_trait::async_trait]
pub trait Job: Send + Sync {
/// The target type this job handles
fn target_type(&self) -> TargetType;
/// Process the job with the given API client and database pool
async fn process(&self, banner_api: &BannerApi, db_pool: &PgPool) -> Result<()>;
/// Get a human-readable description of the job
fn description(&self) -> String;
}
/// Main job enum that dispatches to specific job implementations
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum JobType {
Subject(subject::SubjectJob),
}
impl JobType {
/// Create a job from the target type and payload
pub fn from_target_type_and_payload(
target_type: TargetType,
payload: serde_json::Value,
) -> Result<Self, JobParseError> {
match target_type {
TargetType::Subject => {
let subject_job: subject::SubjectJob =
serde_json::from_value(payload).map_err(JobParseError::InvalidJson)?;
Ok(JobType::Subject(subject_job))
}
_ => Err(JobParseError::UnsupportedTargetType(target_type)),
}
}
/// Convert to a Job trait object
pub fn as_job(self) -> Box<dyn Job> {
match self {
JobType::Subject(job) => Box::new(job),
}
}
}
/// Helper function to create a subject job
pub fn create_subject_job(subject: String) -> JobType {
JobType::Subject(subject::SubjectJob::new(subject))
}

View File

@@ -0,0 +1,93 @@
use super::Job;
use crate::banner::{BannerApi, Course, SearchQuery, Term};
use crate::data::models::TargetType;
use crate::error::Result;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use tracing::{debug, info, trace};
/// Job implementation for scraping subject data
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SubjectJob {
pub subject: String,
}
impl SubjectJob {
pub fn new(subject: String) -> Self {
Self { subject }
}
}
#[async_trait::async_trait]
impl Job for SubjectJob {
fn target_type(&self) -> TargetType {
TargetType::Subject
}
async fn process(&self, banner_api: &BannerApi, db_pool: &PgPool) -> Result<()> {
let subject_code = &self.subject;
debug!(subject = subject_code, "Processing subject job");
// Get the current term
let term = Term::get_current().inner().to_string();
let query = SearchQuery::new().subject(subject_code).max_results(500);
let search_result = banner_api
.search(&term, &query, "subjectDescription", false)
.await?;
if let Some(courses_from_api) = search_result.data {
info!(
subject = subject_code,
count = courses_from_api.len(),
"Found courses"
);
for course in courses_from_api {
self.upsert_course(&course, db_pool).await?;
}
}
debug!(subject = subject_code, "Subject job completed");
Ok(())
}
fn description(&self) -> String {
format!("Scrape subject: {}", self.subject)
}
}
impl SubjectJob {
async fn upsert_course(&self, course: &Course, db_pool: &PgPool) -> Result<()> {
sqlx::query(
r#"
INSERT INTO courses (crn, subject, course_number, title, term_code, enrollment, max_enrollment, wait_count, wait_capacity, last_scraped_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (crn, term_code) DO UPDATE SET
subject = EXCLUDED.subject,
course_number = EXCLUDED.course_number,
title = EXCLUDED.title,
enrollment = EXCLUDED.enrollment,
max_enrollment = EXCLUDED.max_enrollment,
wait_count = EXCLUDED.wait_count,
wait_capacity = EXCLUDED.wait_capacity,
last_scraped_at = EXCLUDED.last_scraped_at
"#,
)
.bind(&course.course_reference_number)
.bind(&course.subject)
.bind(&course.course_number)
.bind(&course.course_title)
.bind(&course.term)
.bind(course.enrollment)
.bind(course.maximum_enrollment)
.bind(course.wait_count)
.bind(course.wait_capacity)
.bind(chrono::Utc::now())
.execute(db_pool)
.await
.map(|result| {
trace!(subject = course.subject, crn = course.course_reference_number, result = ?result, "Course upserted");
})
.map_err(|e| anyhow::anyhow!("Failed to upsert course: {e}"))
}
}

View File

@@ -1,3 +1,4 @@
pub mod jobs;
pub mod scheduler;
pub mod worker;
@@ -35,14 +36,14 @@ impl ScraperService {
/// Starts the scheduler and a pool of workers.
pub fn start(&mut self) {
info!("ScraperService starting...");
info!("ScraperService starting");
let scheduler = Scheduler::new(self.db_pool.clone(), self.banner_api.clone());
let scheduler_handle = tokio::spawn(async move {
scheduler.run().await;
});
self.scheduler_handle = Some(scheduler_handle);
info!("Scheduler task spawned.");
info!("Scheduler task spawned");
let worker_count = 4; // This could be configurable
for i in 0..worker_count {
@@ -52,19 +53,22 @@ impl ScraperService {
});
self.worker_handles.push(worker_handle);
}
info!("Spawned {} worker tasks.", self.worker_handles.len());
info!(
worker_count = self.worker_handles.len(),
"Spawned worker tasks"
);
}
/// Signals all child tasks to gracefully shut down.
pub async fn shutdown(&mut self) {
info!("Shutting down scraper service...");
info!("Shutting down scraper service");
if let Some(handle) = self.scheduler_handle.take() {
handle.abort();
}
for handle in self.worker_handles.drain(..) {
handle.abort();
}
info!("Scraper service shutdown.");
info!("Scraper service shutdown");
}
}

View File

@@ -1,12 +1,13 @@
use crate::banner::{BannerApi, Term};
use crate::data::models::{ScrapePriority, TargetType};
use crate::error::Result;
use crate::scraper::jobs::subject::SubjectJob;
use serde_json::json;
use sqlx::PgPool;
use std::sync::Arc;
use std::time::Duration;
use tokio::time;
use tracing::{error, info};
use tracing::{debug, error, info, trace};
/// Periodically analyzes data and enqueues prioritized scrape jobs.
pub struct Scheduler {
@@ -24,12 +25,12 @@ impl Scheduler {
/// Runs the scheduler's main loop.
pub async fn run(&self) {
info!("Scheduler service started.");
info!("Scheduler service started");
let mut interval = time::interval(Duration::from_secs(60)); // Runs every minute
loop {
interval.tick().await;
info!("Scheduler waking up to analyze and schedule jobs...");
// Scheduler analyzing data...
if let Err(e) = self.schedule_jobs().await {
error!(error = ?e, "Failed to schedule jobs");
}
@@ -40,46 +41,80 @@ impl Scheduler {
async fn schedule_jobs(&self) -> Result<()> {
// For now, we will implement a simple baseline scheduling strategy:
// 1. Get a list of all subjects from the Banner API.
// 2. For each subject, check if an active (not locked, not completed) job already exists.
// 3. If no job exists, create a new, low-priority job to be executed in the near future.
// 2. Query existing jobs for all subjects in a single query.
// 3. Create new jobs only for subjects that don't have existing jobs.
let term = Term::get_current().inner().to_string();
info!(
term = term,
"[Scheduler] Enqueuing baseline subject scrape jobs..."
);
debug!(term = term, "Enqueuing subject jobs");
let subjects = self.banner_api.get_subjects("", &term, 1, 500).await?;
debug!(
subject_count = subjects.len(),
"Retrieved subjects from API"
);
for subject in subjects {
let payload = json!({ "subject": subject.code });
// Create payloads for all subjects
let subject_payloads: Vec<_> = subjects
.iter()
.map(|subject| json!({ "subject": subject.code }))
.collect();
let existing_job: Option<(i32,)> = sqlx::query_as(
"SELECT id FROM scrape_jobs WHERE target_type = $1 AND target_payload = $2 AND locked_at IS NULL"
)
.bind(TargetType::Subject)
.bind(&payload)
.fetch_optional(&self.db_pool)
.await?;
// Query existing jobs for all subjects in a single query
let existing_jobs: Vec<(serde_json::Value,)> = sqlx::query_as(
"SELECT target_payload FROM scrape_jobs
WHERE target_type = $1 AND target_payload = ANY($2) AND locked_at IS NULL",
)
.bind(TargetType::Subject)
.bind(&subject_payloads)
.fetch_all(&self.db_pool)
.await?;
if existing_job.is_some() {
continue;
// Convert to a HashSet for efficient lookup
let existing_payloads: std::collections::HashSet<String> = existing_jobs
.into_iter()
.map(|(payload,)| payload.to_string())
.collect();
// Filter out subjects that already have jobs and prepare new jobs
let new_jobs: Vec<_> = subjects
.into_iter()
.filter_map(|subject| {
let job = SubjectJob::new(subject.code.clone());
let payload = serde_json::to_value(&job).unwrap();
let payload_str = payload.to_string();
if existing_payloads.contains(&payload_str) {
trace!(subject = subject.code, "Job already exists, skipping");
None
} else {
Some((payload, subject.code))
}
})
.collect();
// Insert all new jobs in a single batch
if !new_jobs.is_empty() {
let now = chrono::Utc::now();
let mut tx = self.db_pool.begin().await?;
for (payload, subject_code) in new_jobs {
sqlx::query(
"INSERT INTO scrape_jobs (target_type, target_payload, priority, execute_at) VALUES ($1, $2, $3, $4)"
)
.bind(TargetType::Subject)
.bind(&payload)
.bind(ScrapePriority::Low)
.bind(now)
.execute(&mut *tx)
.await?;
debug!(subject = subject_code, "New job enqueued for subject");
}
sqlx::query(
"INSERT INTO scrape_jobs (target_type, target_payload, priority, execute_at) VALUES ($1, $2, $3, $4)"
)
.bind(TargetType::Subject)
.bind(&payload)
.bind(ScrapePriority::Low)
.bind(chrono::Utc::now())
.execute(&self.db_pool)
.await?;
info!(subject = subject.code, "[Scheduler] Enqueued new job");
tx.commit().await?;
}
info!("[Scheduler] Job scheduling complete.");
debug!("Job scheduling complete");
Ok(())
}
}

View File

@@ -1,12 +1,12 @@
use crate::banner::{BannerApi, BannerApiError, Course, SearchQuery, Term};
use crate::banner::{BannerApi, BannerApiError};
use crate::data::models::ScrapeJob;
use crate::error::Result;
use serde_json::Value;
use crate::scraper::jobs::{JobError, JobType};
use sqlx::PgPool;
use std::sync::Arc;
use std::time::Duration;
use tokio::time;
use tracing::{error, info, warn};
use tracing::{debug, error, info, trace, warn};
/// A single worker instance.
///
@@ -34,44 +34,65 @@ impl Worker {
match self.fetch_and_lock_job().await {
Ok(Some(job)) => {
let job_id = job.id;
info!(worker_id = self.id, job_id = job.id, "Processing job");
if let Err(e) = self.process_job(job).await {
// Check if the error is due to an invalid session
if let Some(BannerApiError::InvalidSession(_)) =
e.downcast_ref::<BannerApiError>()
{
warn!(
worker_id = self.id,
job_id, "Invalid session detected. Forcing session refresh."
);
} else {
error!(worker_id = self.id, job_id, error = ?e, "Failed to process job");
debug!(worker_id = self.id, job_id = job.id, "Processing job");
match self.process_job(job).await {
Ok(()) => {
debug!(worker_id = self.id, job_id, "Job completed");
// If successful, delete the job.
if let Err(delete_err) = self.delete_job(job_id).await {
error!(
worker_id = self.id,
job_id,
?delete_err,
"Failed to delete job"
);
}
}
Err(JobError::Recoverable(e)) => {
// Check if the error is due to an invalid session
if let Some(BannerApiError::InvalidSession(_)) =
e.downcast_ref::<BannerApiError>()
{
warn!(
worker_id = self.id,
job_id, "Invalid session detected. Forcing session refresh."
);
} else {
error!(worker_id = self.id, job_id, error = ?e, "Failed to process job");
}
// Unlock the job so it can be retried
if let Err(unlock_err) = self.unlock_job(job_id).await {
error!(
worker_id = self.id,
job_id,
?unlock_err,
"Failed to unlock job"
);
// Unlock the job so it can be retried
if let Err(unlock_err) = self.unlock_job(job_id).await {
error!(
worker_id = self.id,
job_id,
?unlock_err,
"Failed to unlock job"
);
}
}
} else {
info!(worker_id = self.id, job_id, "Job processed successfully");
// If successful, delete the job.
if let Err(delete_err) = self.delete_job(job_id).await {
Err(JobError::Unrecoverable(e)) => {
error!(
worker_id = self.id,
job_id,
?delete_err,
"Failed to delete job"
error = ?e,
"Job corrupted, deleting"
);
// Parse errors are unrecoverable - delete the job
if let Err(delete_err) = self.delete_job(job_id).await {
error!(
worker_id = self.id,
job_id,
?delete_err,
"Failed to delete corrupted job"
);
}
}
}
}
Ok(None) => {
// No job found, wait for a bit before polling again.
trace!(worker_id = self.id, "No jobs available, waiting");
time::sleep(Duration::from_secs(5)).await;
}
Err(e) => {
@@ -108,79 +129,26 @@ impl Worker {
Ok(job)
}
async fn process_job(&self, job: ScrapeJob) -> Result<()> {
match job.target_type {
crate::data::models::TargetType::Subject => {
self.process_subject_job(&job.target_payload).await
}
_ => {
warn!(worker_id = self.id, job_id = job.id, "unhandled job type");
Ok(())
}
}
}
async fn process_job(&self, job: ScrapeJob) -> Result<(), JobError> {
// Convert the database job to our job type
let job_type = JobType::from_target_type_and_payload(job.target_type, job.target_payload)
.map_err(|e| JobError::Unrecoverable(anyhow::anyhow!(e)))?; // Parse errors are unrecoverable
async fn process_subject_job(&self, payload: &Value) -> Result<()> {
let subject_code = payload["subject"]
.as_str()
.ok_or_else(|| anyhow::anyhow!("Invalid subject payload"))?;
info!(
// Get the job implementation
let job_impl = job_type.as_job();
debug!(
worker_id = self.id,
subject = subject_code,
"Processing subject job"
job_id = job.id,
description = job_impl.description(),
"Processing job"
);
let term = Term::get_current().inner().to_string();
let query = SearchQuery::new().subject(subject_code).max_results(500);
let search_result = self
.banner_api
.search(&term, &query, "subjectDescription", false)
.await?;
if let Some(courses_from_api) = search_result.data {
info!(
worker_id = self.id,
subject = subject_code,
count = courses_from_api.len(),
"Found courses to upsert"
);
for course in courses_from_api {
self.upsert_course(&course).await?;
}
}
Ok(())
}
async fn upsert_course(&self, course: &Course) -> Result<()> {
sqlx::query(
r#"
INSERT INTO courses (crn, subject, course_number, title, term_code, enrollment, max_enrollment, wait_count, wait_capacity, last_scraped_at)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
ON CONFLICT (crn, term_code) DO UPDATE SET
subject = EXCLUDED.subject,
course_number = EXCLUDED.course_number,
title = EXCLUDED.title,
enrollment = EXCLUDED.enrollment,
max_enrollment = EXCLUDED.max_enrollment,
wait_count = EXCLUDED.wait_count,
wait_capacity = EXCLUDED.wait_capacity,
last_scraped_at = EXCLUDED.last_scraped_at
"#,
)
.bind(&course.course_reference_number)
.bind(&course.subject)
.bind(&course.course_number)
.bind(&course.course_title)
.bind(&course.term)
.bind(course.enrollment)
.bind(course.maximum_enrollment)
.bind(course.wait_count)
.bind(course.wait_capacity)
.bind(chrono::Utc::now())
.execute(&self.db_pool)
.await?;
// Process the job - API errors are recoverable
job_impl
.process(&self.banner_api, &self.db_pool)
.await
.map_err(JobError::Recoverable)?;
Ok(())
}
@@ -190,7 +158,6 @@ impl Worker {
.bind(job_id)
.execute(&self.db_pool)
.await?;
info!(worker_id = self.id, job_id, "Job deleted");
Ok(())
}
@@ -199,7 +166,7 @@ impl Worker {
.bind(job_id)
.execute(&self.db_pool)
.await?;
info!(worker_id = self.id, job_id, "Job unlocked after failure");
info!(worker_id = self.id, job_id, "Job unlocked for retry");
Ok(())
}
}

View File

@@ -1,7 +1,7 @@
use super::Service;
use serenity::Client;
use std::sync::Arc;
use tracing::{debug, error};
use tracing::{error, warn};
/// Discord bot service implementation
pub struct BotService {
@@ -28,7 +28,7 @@ impl Service for BotService {
async fn run(&mut self) -> Result<(), anyhow::Error> {
match self.client.start().await {
Ok(()) => {
debug!(service = "bot", "stopped early.");
warn!(service = "bot", "stopped early");
Err(anyhow::anyhow!("bot stopped early"))
}
Err(e) => {

View File

@@ -42,7 +42,7 @@ impl ServiceManager {
for (name, service) in self.registered_services.drain() {
let shutdown_rx = self.shutdown_tx.subscribe();
let handle = tokio::spawn(run_service(service, shutdown_rx));
trace!(service = name, id = ?handle.id(), "service spawned",);
debug!(service = name, id = ?handle.id(), "service spawned");
self.running_services.insert(name, handle);
}
@@ -127,7 +127,7 @@ impl ServiceManager {
for (name, handle) in self.running_services.drain() {
match tokio::time::timeout(timeout, handle).await {
Ok(Ok(_)) => {
debug!(service = name, "service shutdown completed");
trace!(service = name, "service shutdown completed");
}
Ok(Err(e)) => {
warn!(service = name, error = ?e, "service shutdown failed");

View File

@@ -3,7 +3,7 @@ use crate::web::{BannerState, create_router};
use std::net::SocketAddr;
use tokio::net::TcpListener;
use tokio::sync::broadcast;
use tracing::{debug, info, warn};
use tracing::{info, warn, trace};
/// Web server service implementation
pub struct WebService {
@@ -40,10 +40,10 @@ impl Service for WebService {
);
let listener = TcpListener::bind(addr).await?;
debug!(
info!(
service = "web",
"web server listening on {}",
format!("http://{}", addr)
address = %addr,
"web server listening"
);
// Create internal shutdown channel for axum graceful shutdown
@@ -54,7 +54,7 @@ impl Service for WebService {
axum::serve(listener, app)
.with_graceful_shutdown(async move {
let _ = shutdown_rx.recv().await;
debug!(
trace!(
service = "web",
"received shutdown signal, starting graceful shutdown"
);

View File

@@ -3,46 +3,36 @@
use crate::banner::BannerApi;
use crate::banner::Course;
use anyhow::Result;
use redis::AsyncCommands;
use redis::Client;
use sqlx::PgPool;
use std::sync::Arc;
#[derive(Clone)]
pub struct AppState {
pub banner_api: Arc<BannerApi>,
pub redis: Arc<Client>,
pub db_pool: PgPool,
}
impl AppState {
pub fn new(
banner_api: Arc<BannerApi>,
redis_url: &str,
) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let redis_client = Client::open(redis_url)?;
Ok(Self {
pub fn new(banner_api: Arc<BannerApi>, db_pool: PgPool) -> Self {
Self {
banner_api,
redis: Arc::new(redis_client),
})
db_pool,
}
}
/// Get a course by CRN with Redis cache fallback to Banner API
/// Get a course by CRN directly from Banner API
pub async fn get_course_or_fetch(&self, term: &str, crn: &str) -> Result<Course> {
let mut conn = self.redis.get_multiplexed_async_connection().await?;
self.banner_api
.get_course_by_crn(term, crn)
.await?
.ok_or_else(|| anyhow::anyhow!("Course not found for CRN {crn}"))
}
let key = format!("class:{crn}");
if let Some(serialized) = conn.get::<_, Option<String>>(&key).await? {
let course: Course = serde_json::from_str(&serialized)?;
return Ok(course);
}
// Fallback: fetch from Banner API
if let Some(course) = self.banner_api.get_course_by_crn(term, crn).await? {
let serialized = serde_json::to_string(&course)?;
let _: () = conn.set(&key, serialized).await?;
return Ok(course);
}
Err(anyhow::anyhow!("Course not found for CRN {crn}"))
/// Get the total number of courses in the database
pub async fn get_course_count(&self) -> Result<i64> {
let count: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM courses")
.fetch_one(&self.db_pool)
.await?;
Ok(count.0)
}
}

View File

@@ -69,18 +69,8 @@ async fn status(State(_state): State<BannerState>) -> Json<Value> {
async fn metrics(State(_state): State<BannerState>) -> Json<Value> {
// For now, return basic metrics structure
Json(json!({
"redis": {
"status": "connected",
"connected_clients": "TODO: implement client counting",
"used_memory": "TODO: implement memory tracking"
},
"cache": {
"courses": {
"count": "TODO: implement course counting"
},
"subjects": {
"count": "TODO: implement subject counting"
}
"banner_api": {
"status": "connected"
},
"timestamp": chrono::Utc::now().to_rfc3339()
}))