From ca18be727252096458a8a35e8617ecb9f33d7a5d Mon Sep 17 00:00:00 2001 From: bybrooklyn Date: Sat, 4 Apr 2026 00:30:41 -0400 Subject: [PATCH] feat: add job stall detection, database connection limits, configurable local notifications, and secure proxy header handling with entrypoint support. --- Dockerfile | 11 ++++- entrypoint.sh | 31 +++++++++++++ src/config.rs | 15 +++++- src/db.rs | 4 +- src/main.rs | 13 +++--- src/media/pipeline.rs | 23 +++++++++- src/media/processor.rs | 34 +++----------- src/notifications.rs | 37 +++++++++++---- src/orchestrator.rs | 56 +++++++++++++--------- src/server/middleware.rs | 52 +++++++++++++++------ src/server/mod.rs | 6 ++- src/server/sse.rs | 44 ++++++++++++++++-- src/server/tests.rs | 2 + src/system/fs_browser.rs | 27 +++++------ web/src/components/Dashboard.tsx | 5 +- web/src/components/ErrorBoundary.tsx | 69 ++++++++++++++++++++++++++++ web/src/components/JobManager.tsx | 5 +- web/src/pages/500.astro | 38 +++++++++++++++ 18 files changed, 361 insertions(+), 111 deletions(-) create mode 100644 entrypoint.sh create mode 100644 web/src/components/ErrorBoundary.tsx create mode 100644 web/src/pages/500.astro diff --git a/Dockerfile b/Dockerfile index 8be29e9..39a2a03 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,6 +43,7 @@ RUN apt-get update && \ va-driver-all \ libsqlite3-0 \ ca-certificates \ + gosu \ && if [ "$(dpkg --print-architecture)" = "amd64" ]; then \ apt-get install -y --no-install-recommends \ intel-media-va-driver-non-free \ @@ -75,10 +76,16 @@ RUN set -e; \ COPY --from=builder /app/target/release/alchemist /usr/local/bin/alchemist # Set environment variables -ENV LIBVA_DRIVER_NAME=iHD +# VA-API driver auto-detection: do NOT hardcode LIBVA_DRIVER_NAME here. +# Users can override via: docker run -e LIBVA_DRIVER_NAME=iHD ... +# Common values: iHD (Intel ≥ Broadwell), i965 (older Intel), radeonsi (AMD) ENV RUST_LOG=info ENV ALCHEMIST_CONFIG_PATH=/app/config/config.toml ENV ALCHEMIST_DB_PATH=/app/data/alchemist.db +COPY entrypoint.sh /app/entrypoint.sh +RUN chmod +x /app/entrypoint.sh + EXPOSE 3000 -ENTRYPOINT ["alchemist"] +ENTRYPOINT ["/app/entrypoint.sh"] +CMD ["alchemist"] diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100644 index 0000000..3232ccb --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -e + +PUID=${PUID:-0} +PGID=${PGID:-0} + +if [ "$PUID" -ne 0 ] && [ "$PGID" -ne 0 ]; then + echo "Starting Alchemist with UID: $PUID, GID: $PGID" + + # Create group and user securely if they don't exist + if ! getent group alchemist >/dev/null; then + groupadd -g "$PGID" alchemist + fi + if ! getent passwd alchemist >/dev/null; then + useradd -u "$PUID" -g "$PGID" -s /bin/bash -m -d /app alchemist + fi + + # Take ownership of app data — skip gracefully for read-only mounts + for dir in /app/config /app/data; do + if [ -d "$dir" ]; then + chown -R alchemist:alchemist "$dir" 2>/dev/null || \ + echo "Warning: Cannot chown $dir (read-only mount?). Continuing..." + fi + done + + # Drop privileges and execute + exec gosu alchemist "$@" +else + # Run natively + exec "$@" +fi diff --git a/src/config.rs b/src/config.rs index 2a8940f..91ad53e 100644 --- a/src/config.rs +++ b/src/config.rs @@ -716,12 +716,23 @@ impl Config { Ok(()) } - /// Save config to file + /// Save config to file atomically (write to temp, then rename). + /// This prevents corruption if the process crashes mid-write. pub fn save(&self, path: &Path) -> Result<()> { let mut config = self.clone(); config.canonicalize_for_save(); let content = toml::to_string_pretty(&config)?; - std::fs::write(path, content)?; + + let tmp = path.with_extension("toml.tmp"); + std::fs::write(&tmp, &content)?; + + // Atomic rename: if this fails, the original config is still intact. + if let Err(e) = std::fs::rename(&tmp, path) { + // Clean up the temp file on rename failure + let _ = std::fs::remove_file(&tmp); + return Err(e.into()); + } + Ok(()) } diff --git a/src/db.rs b/src/db.rs index 56e89a3..b802cf4 100644 --- a/src/db.rs +++ b/src/db.rs @@ -619,7 +619,9 @@ impl Db { .journal_mode(SqliteJournalMode::Wal) .busy_timeout(Duration::from_secs(5)); - let pool = SqlitePool::connect_with(options).await?; + let pool = sqlx::sqlite::SqlitePoolOptions::new() + .max_connections(1) + .connect_with(options).await?; info!( target: "startup", "Database connection opened in {} ms", diff --git a/src/main.rs b/src/main.rs index c25191a..f781a80 100644 --- a/src/main.rs +++ b/src/main.rs @@ -463,17 +463,18 @@ async fn run() -> Result<()> { // Keep legacy channel for transition compatibility let (tx, _rx) = broadcast::channel(100); - // Initialize Notification Manager - let notification_manager = Arc::new(alchemist::notifications::NotificationManager::new( - db.as_ref().clone(), - )); - notification_manager.start_listener(tx.subscribe()); - let transcoder = Arc::new(Transcoder::new()); let hardware_state = hardware::HardwareState::new(Some(hw_info.clone())); let hardware_probe_log = Arc::new(RwLock::new(initial_probe_log)); let config = Arc::new(RwLock::new(config)); + // Initialize Notification Manager (needs config for allow_local_notifications) + let notification_manager = Arc::new(alchemist::notifications::NotificationManager::new( + db.as_ref().clone(), + config.clone(), + )); + notification_manager.start_listener(tx.subscribe()); + let maintenance_db = db.clone(); let maintenance_config = config.clone(); tokio::spawn(async move { diff --git a/src/media/pipeline.rs b/src/media/pipeline.rs index c4fcabd..ef183a8 100644 --- a/src/media/pipeline.rs +++ b/src/media/pipeline.rs @@ -1263,8 +1263,27 @@ impl Pipeline { if let Ok(file_settings) = self.db.get_file_settings().await { if file_settings.delete_source { - if let Err(e) = std::fs::remove_file(input_path) { - tracing::warn!("Failed to delete source {:?}: {}", input_path, e); + // Safety: verify the promoted output is intact before destroying the source. + // This prevents data loss if the filesystem silently corrupted the output + // during rename (e.g., stale NFS/SMB mount, full disk). + match std::fs::metadata(context.output_path) { + Ok(m) if m.len() > 0 => { + if let Err(e) = std::fs::remove_file(input_path) { + tracing::warn!("Failed to delete source {:?}: {}", input_path, e); + } + } + Ok(_) => { + tracing::error!( + "Job {}: Output file {:?} is empty after promotion — source preserved to prevent data loss", + job_id, context.output_path + ); + } + Err(e) => { + tracing::error!( + "Job {}: Cannot verify output {:?} after promotion ({}). Source preserved to prevent data loss", + job_id, context.output_path, e + ); + } } } } diff --git a/src/media/processor.rs b/src/media/processor.rs index 720295f..4879092 100644 --- a/src/media/processor.rs +++ b/src/media/processor.rs @@ -450,44 +450,24 @@ impl Agent { } /// Gracefully shutdown the agent. - /// Drains active jobs and waits up to `timeout` for them to complete. - /// After timeout, forcefully cancels remaining jobs. - pub async fn graceful_shutdown(&self, timeout: std::time::Duration) { - info!("Initiating graceful shutdown..."); + /// Cancels active jobs immediately and returns quickly. + pub async fn graceful_shutdown(&self) { + info!("Initiating rapid shutdown..."); // Stop accepting new jobs self.pause(); - self.drain(); - // Wait for active jobs to complete (with timeout) - let start = std::time::Instant::now(); - let check_interval = std::time::Duration::from_millis(500); - - while start.elapsed() < timeout { - let active = self.orchestrator.active_job_count(); - if active == 0 { - info!("All jobs completed gracefully."); - return; - } - info!( - "Waiting for {} active job(s) to complete... ({:.0}s remaining)", - active, - (timeout - start.elapsed()).as_secs_f64() - ); - tokio::time::sleep(check_interval).await; - } - - // Timeout reached - force cancel remaining jobs + // Immediately force cancel remaining jobs let cancelled = self.orchestrator.cancel_all_jobs(); if cancelled > 0 { tracing::warn!( - "Shutdown timeout reached. Forcefully cancelled {} job(s).", + "Fast shutdown requested. Forcefully cancelled {} job(s).", cancelled ); - // Give FFmpeg processes a moment to terminate + // Give FFmpeg processes a moment to terminate and Tokio to flush DB statuses tokio::time::sleep(std::time::Duration::from_secs(2)).await; } - info!("Graceful shutdown complete."); + info!("Rapid shutdown complete."); } } diff --git a/src/notifications.rs b/src/notifications.rs index 2b09d77..e6c54cb 100644 --- a/src/notifications.rs +++ b/src/notifications.rs @@ -1,20 +1,23 @@ +use crate::config::Config; use crate::db::{AlchemistEvent, Db, NotificationTarget}; use reqwest::{Client, Url, redirect::Policy}; use serde_json::json; use std::net::IpAddr; +use std::sync::Arc; use std::time::Duration; use tokio::net::lookup_host; -use tokio::sync::broadcast; +use tokio::sync::{RwLock, broadcast}; use tracing::{error, warn}; #[derive(Clone)] pub struct NotificationManager { db: Db, + config: Arc>, } impl NotificationManager { - pub fn new(db: Db) -> Self { - Self { db } + pub fn new(db: Db, config: Arc>) -> Self { + Self { db, config } } pub fn start_listener(&self, mut rx: broadcast::Receiver) { @@ -111,17 +114,28 @@ impl NotificationManager { .ok_or("notification endpoint host is missing")?; let port = url.port_or_known_default().ok_or("invalid port")?; - if host.eq_ignore_ascii_case("localhost") { + let allow_local = self.config.read().await.notifications.allow_local_notifications; + + if !allow_local && host.eq_ignore_ascii_case("localhost") { return Err("localhost is not allowed as a notification endpoint".into()); } let addr = format!("{}:{}", host, port); let ips = tokio::time::timeout(Duration::from_secs(3), lookup_host(&addr)).await??; - let target_ip = ips - .into_iter() - .map(|a| a.ip()) - .find(|ip| !is_private_ip(*ip)) - .ok_or("no public IP address found for notification endpoint")?; + + let target_ip = if allow_local { + // When local notifications are allowed, accept any resolved IP + ips.into_iter() + .map(|a| a.ip()) + .next() + .ok_or("no IP address found for notification endpoint")? + } else { + // When local notifications are blocked, only use public IPs + ips.into_iter() + .map(|a| a.ip()) + .find(|ip| !is_private_ip(*ip)) + .ok_or("no public IP address found for notification endpoint")? + }; // Pin the request to the validated IP to prevent DNS rebinding let client = Client::builder() @@ -324,7 +338,10 @@ mod tests { db_path.push(format!("alchemist_notifications_test_{}.db", token)); let db = Db::new(db_path.to_string_lossy().as_ref()).await?; - let manager = NotificationManager::new(db); + let mut test_config = crate::config::Config::default(); + test_config.notifications.allow_local_notifications = true; + let config = Arc::new(RwLock::new(test_config)); + let manager = NotificationManager::new(db, config); let listener = match TcpListener::bind("127.0.0.1:0").await { Ok(listener) => listener, diff --git a/src/orchestrator.rs b/src/orchestrator.rs index c857ebc..4fd2d42 100644 --- a/src/orchestrator.rs +++ b/src/orchestrator.rs @@ -294,32 +294,46 @@ impl Transcoder { loop { tokio::select! { - line_res = reader.next_line() => { - match line_res { - Ok(Some(line)) => { - let line = if line.len() > 4096 { - format!("{}...[truncated]", &line[..4096]) - } else { - line - }; - last_lines.push_back(line.clone()); - if last_lines.len() > 20 { - last_lines.pop_front(); - } + line_res_timeout = tokio::time::timeout(tokio::time::Duration::from_secs(600), reader.next_line()) => { + match line_res_timeout { + Ok(line_res) => match line_res { + Ok(Some(line)) => { + let line = if line.len() > 4096 { + format!("{}...[truncated]", &line[..4096]) + } else { + line + }; + last_lines.push_back(line.clone()); + if last_lines.len() > 20 { + last_lines.pop_front(); + } - if let Some(observer) = observer.as_ref() { - observer.on_log(line.clone()).await; + if let Some(observer) = observer.as_ref() { + observer.on_log(line.clone()).await; - if let Some(total_duration) = total_duration { - if let Some(progress) = progress_state.ingest_line(&line) { - observer.on_progress(progress, total_duration).await; + if let Some(total_duration) = total_duration { + if let Some(progress) = progress_state.ingest_line(&line) { + observer.on_progress(progress, total_duration).await; + } } } } - } - Ok(None) => break, - Err(e) => { - error!("Error reading FFmpeg stderr: {}", e); + Ok(None) => break, + Err(e) => { + error!("Error reading FFmpeg stderr: {}", e); + break; + } + }, + Err(_) => { + error!("Job {:?} stalled: No output from FFmpeg for 10 minutes. Killing process...", job_id); + let _ = child.kill().await; + killed = true; + if let Some(id) = job_id { + match self.cancel_channels.lock() { + Ok(mut channels) => { channels.remove(&id); } + Err(e) => { e.into_inner().remove(&id); } + } + } break; } } diff --git a/src/server/middleware.rs b/src/server/middleware.rs index 033ebf2..d3fa63f 100644 --- a/src/server/middleware.rs +++ b/src/server/middleware.rs @@ -235,23 +235,45 @@ pub(crate) fn get_cookie_value(headers: &axum::http::HeaderMap, name: &str) -> O } pub(crate) fn request_ip(req: &Request) -> Option { - if let Some(xff) = req.headers().get("X-Forwarded-For") { - if let Ok(xff_str) = xff.to_str() { - if let Some(ip_str) = xff_str.split(',').next() { - if let Ok(ip) = ip_str.trim().parse() { - return Some(ip); + let peer_ip = req + .extensions() + .get::>() + .map(|info| info.0.ip()); + + // Only trust proxy headers (X-Forwarded-For, X-Real-IP) when the direct + // TCP peer is a loopback or private IP — i.e., a trusted reverse proxy. + // This prevents external attackers from spoofing these headers to bypass + // rate limiting. + if let Some(peer) = peer_ip { + if is_trusted_peer(peer) { + if let Some(xff) = req.headers().get("X-Forwarded-For") { + if let Ok(xff_str) = xff.to_str() { + if let Some(ip_str) = xff_str.split(',').next() { + if let Ok(ip) = ip_str.trim().parse() { + return Some(ip); + } + } + } + } + if let Some(xri) = req.headers().get("X-Real-IP") { + if let Ok(xri_str) = xri.to_str() { + if let Ok(ip) = xri_str.trim().parse() { + return Some(ip); + } } } } } - if let Some(xri) = req.headers().get("X-Real-IP") { - if let Ok(xri_str) = xri.to_str() { - if let Ok(ip) = xri_str.trim().parse() { - return Some(ip); - } - } - } - req.extensions() - .get::>() - .map(|info| info.0.ip()) + + peer_ip +} + +/// Returns true if the peer IP is a loopback or private address, +/// meaning it is likely a local reverse proxy that can be trusted +/// to set forwarded headers. +fn is_trusted_peer(ip: IpAddr) -> bool { + match ip { + IpAddr::V4(v4) => v4.is_loopback() || v4.is_private() || v4.is_link_local(), + IpAddr::V6(v6) => v6.is_loopback() || v6.is_unique_local() || v6.is_unicast_link_local(), + } } diff --git a/src/server/mod.rs b/src/server/mod.rs index 2f6a098..fb53656 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -84,6 +84,7 @@ pub struct AppState { pub resources_cache: Arc>>, pub(crate) login_rate_limiter: Mutex>, pub(crate) global_rate_limiter: Mutex>, + pub(crate) sse_connections: Arc, } pub struct RunServerArgs { @@ -164,6 +165,7 @@ pub async fn run_server(args: RunServerArgs) -> Result<()> { resources_cache: Arc::new(tokio::sync::Mutex::new(None)), login_rate_limiter: Mutex::new(HashMap::new()), global_rate_limiter: Mutex::new(HashMap::new()), + sse_connections: Arc::new(std::sync::atomic::AtomicUsize::new(0)), }); // Clone agent for shutdown handler before moving state into router @@ -265,9 +267,9 @@ pub async fn run_server(args: RunServerArgs) -> Result<()> { } } - // Give active jobs up to 5 minutes to complete + // Forceful immediate shutdown of active jobs shutdown_agent - .graceful_shutdown(std::time::Duration::from_secs(300)) + .graceful_shutdown() .await; }) .await diff --git a/src/server/sse.rs b/src/server/sse.rs index 99a27be..a9d7202 100644 --- a/src/server/sse.rs +++ b/src/server/sse.rs @@ -171,9 +171,40 @@ pub(crate) fn sse_unified_stream( ]) } +/// Maximum concurrent SSE connections to prevent resource exhaustion. +const MAX_SSE_CONNECTIONS: usize = 50; + +/// RAII guard that decrements the SSE connection counter on drop. +struct SseConnectionGuard(Arc); + +impl Drop for SseConnectionGuard { + fn drop(&mut self) { + self.0.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); + } +} + pub(crate) async fn sse_handler( State(state): State>, -) -> Sse>> { +) -> std::result::Result< + Sse>>, + axum::http::StatusCode, +> { + use std::sync::atomic::Ordering; + + // Enforce connection limit + let current = state.sse_connections.fetch_add(1, Ordering::SeqCst); + if current >= MAX_SSE_CONNECTIONS { + state.sse_connections.fetch_sub(1, Ordering::SeqCst); + warn!( + "SSE connection limit reached ({}/{}). Rejecting new connection.", + current, MAX_SSE_CONNECTIONS + ); + return Err(axum::http::StatusCode::TOO_MANY_REQUESTS); + } + + // RAII guard to decrement the counter when the stream is dropped + let guard = Arc::new(SseConnectionGuard(state.sse_connections.clone())); + // Subscribe to all channels let job_rx = state.event_channels.jobs.subscribe(); let config_rx = state.event_channels.config.subscribe(); @@ -182,10 +213,13 @@ pub(crate) async fn sse_handler( // Create unified stream from new typed channels let unified_stream = sse_unified_stream(job_rx, config_rx, system_rx); - let stream = unified_stream.map(|message| match message { - Ok(message) => Ok(message.into()), - Err(never) => match never {}, + let stream = unified_stream.map(move |message| { + let _guard = guard.clone(); // keep the guard alive as long as the stream lives + match message { + Ok(message) => Ok(message.into()), + Err(never) => match never {}, + } }); - Sse::new(stream).keep_alive(axum::response::sse::KeepAlive::default()) + Ok(Sse::new(stream).keep_alive(axum::response::sse::KeepAlive::default())) } diff --git a/src/server/tests.rs b/src/server/tests.rs index f7689c8..0b8ddf7 100644 --- a/src/server/tests.rs +++ b/src/server/tests.rs @@ -107,6 +107,7 @@ where telemetry_runtime_id: "test-runtime".to_string(), notification_manager: Arc::new(crate::notifications::NotificationManager::new( db.as_ref().clone(), + config.clone(), )), sys: Mutex::new(sys), file_watcher, @@ -118,6 +119,7 @@ where resources_cache: Arc::new(tokio::sync::Mutex::new(None)), login_rate_limiter: Mutex::new(HashMap::new()), global_rate_limiter: Mutex::new(HashMap::new()), + sse_connections: Arc::new(std::sync::atomic::AtomicUsize::new(0)), }); Ok((state.clone(), app_router(state), config_path, db_path)) diff --git a/src/system/fs_browser.rs b/src/system/fs_browser.rs index 02b5984..60523c0 100644 --- a/src/system/fs_browser.rs +++ b/src/system/fs_browser.rs @@ -266,25 +266,20 @@ fn preview_blocking(request: FsPreviewRequest) -> Result { let exists = canonical.exists(); let readable = exists && canonical.is_dir() && std::fs::read_dir(&canonical).is_ok(); - let media_files = if readable { - scanner - .scan_with_recursion(vec![(canonical.clone(), true)]) - .len() - } else { - 0 - }; - total_media_files += media_files; - - let sample_files = if readable { - scanner - .scan_with_recursion(vec![(canonical.clone(), true)]) - .into_iter() - .take(5) - .map(|media| media.path.to_string_lossy().to_string()) - .collect::>() + // Scan once and reuse results for both count and samples + let scan_results = if readable { + scanner.scan_with_recursion(vec![(canonical.clone(), true)]) } else { Vec::new() }; + let media_files = scan_results.len(); + total_media_files += media_files; + + let sample_files = scan_results + .into_iter() + .take(5) + .map(|media| media.path.to_string_lossy().to_string()) + .collect::>(); let mut dir_warnings = directory_warnings(&canonical, readable); if readable && media_files == 0 { diff --git a/web/src/components/Dashboard.tsx b/web/src/components/Dashboard.tsx index 3f4446d..7fe10b2 100644 --- a/web/src/components/Dashboard.tsx +++ b/web/src/components/Dashboard.tsx @@ -12,6 +12,7 @@ import { apiJson, isApiError } from "../lib/api"; import { useSharedStats } from "../lib/statsStore"; import { showToast } from "../lib/toast"; import ResourceMonitor from "./ResourceMonitor"; +import { withErrorBoundary } from "./ErrorBoundary"; interface Job { id: number; @@ -76,7 +77,7 @@ function StatCard({ label, value, icon: Icon, colorClass }: StatCardProps) { ); } -export default function Dashboard() { +function Dashboard() { const [jobs, setJobs] = useState([]); const [jobsLoading, setJobsLoading] = useState(true); const [bundle, setBundle] = useState(null); @@ -366,3 +367,5 @@ export default function Dashboard() { ); } + +export default withErrorBoundary(Dashboard, "Dashboard"); diff --git a/web/src/components/ErrorBoundary.tsx b/web/src/components/ErrorBoundary.tsx new file mode 100644 index 0000000..23aef5b --- /dev/null +++ b/web/src/components/ErrorBoundary.tsx @@ -0,0 +1,69 @@ +import React, { Component, type ReactNode } from "react"; +import { AlertCircle } from "lucide-react"; + +interface Props { + children: ReactNode; + fallback?: ReactNode; + moduleName?: string; +} + +interface State { + hasError: boolean; + errorMessage: string; +} + +export class ErrorBoundary extends Component { + public state: State = { + hasError: false, + errorMessage: "", + }; + + public static getDerivedStateFromError(error: Error): State { + return { hasError: true, errorMessage: error.message }; + } + + public componentDidCatch(error: Error, errorInfo: React.ErrorInfo) { + console.error("Uncaught error in ErrorBoundary:", error, errorInfo); + } + + public render() { + if (this.state.hasError) { + if (this.props.fallback) { + return this.props.fallback; + } + return ( +
+ +

Something went wrong

+

+ The {this.props.moduleName || "component"} encountered an unexpected error and could not be displayed. +

+
+ {this.state.errorMessage} +
+ +
+ ); + } + + return this.props.children; + } +} + +export const withErrorBoundary =

( + WrappedComponent: React.ComponentType

, + moduleName?: string +) => { + return function WithErrorBoundary(props: P) { + return ( + + + + ); + }; +}; diff --git a/web/src/components/JobManager.tsx b/web/src/components/JobManager.tsx index 37ea6c4..5004985 100644 --- a/web/src/components/JobManager.tsx +++ b/web/src/components/JobManager.tsx @@ -11,6 +11,7 @@ import ConfirmDialog from "./ui/ConfirmDialog"; import { clsx, type ClassValue } from "clsx"; import { twMerge } from "tailwind-merge"; import { motion, AnimatePresence } from "framer-motion"; +import { withErrorBoundary } from "./ErrorBoundary"; function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)); @@ -364,7 +365,7 @@ const SORT_OPTIONS: Array<{ value: SortField; label: string }> = [ { value: "size", label: "File Size" }, ]; -export default function JobManager() { +function JobManager() { const [jobs, setJobs] = useState([]); const [loading, setLoading] = useState(true); const [selected, setSelected] = useState>(new Set()); @@ -1769,3 +1770,5 @@ export default function JobManager() { ); } + +export default withErrorBoundary(JobManager, "Job Management"); diff --git a/web/src/pages/500.astro b/web/src/pages/500.astro new file mode 100644 index 0000000..5783ec0 --- /dev/null +++ b/web/src/pages/500.astro @@ -0,0 +1,38 @@ +--- +import Layout from "../layouts/Layout.astro"; +import { AlertTriangle } from "lucide-react"; + +interface Props { + error: unknown; +} + +const { error } = Astro.props; +--- + + +

+
+
+ +
+ +

500 Server Error

+

+ Alchemist encountered an internal error. Please check the backend logs. +

+ + {error instanceof Error ? ( +
+

{error.message}

+
+ ) : null} + + + Return to Dashboard + +
+
+