feat: Add v0.2.0 features - health endpoints, scheduler, stats dashboard, HEVC/subtitle config, Helm chart, docs

This commit is contained in:
Brooklyn
2026-01-08 23:12:36 -05:00
parent d78b3a472c
commit ad31409667
16 changed files with 911 additions and 3 deletions

17
deploy/helm/Chart.yaml Normal file
View File

@@ -0,0 +1,17 @@
apiVersion: v2
name: alchemist
description: AV1 video transcoding automation server
type: application
version: 0.1.0
appVersion: "0.2.0"
keywords:
- transcoding
- av1
- video
- ffmpeg
- media
home: https://github.com/BrooklynLovesZelda/alchemist
sources:
- https://github.com/BrooklynLovesZelda/alchemist
maintainers:
- name: BrooklynLovesZelda

View File

@@ -0,0 +1,60 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "alchemist.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "alchemist.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "alchemist.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "alchemist.labels" -}}
helm.sh/chart: {{ include "alchemist.chart" . }}
{{ include "alchemist.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "alchemist.selectorLabels" -}}
app.kubernetes.io/name: {{ include "alchemist.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "alchemist.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "alchemist.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "alchemist.fullname" . }}-config
labels:
{{- include "alchemist.labels" . | nindent 4 }}
data:
config.toml: |
{{- .Values.config.content | nindent 4 }}

View File

@@ -0,0 +1,93 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "alchemist.fullname" . }}
labels:
{{- include "alchemist.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "alchemist.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "alchemist.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "alchemist.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 3000
protocol: TCP
livenessProbe:
httpGet:
path: /api/health
port: http
initialDelaySeconds: 10
periodSeconds: 30
readinessProbe:
httpGet:
path: /api/ready
port: http
initialDelaySeconds: 5
periodSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: config
mountPath: /app/config.toml
subPath: config.toml
- name: data
mountPath: /app/data
{{- if .Values.media.enabled }}
- name: media
mountPath: {{ .Values.media.mountPath }}
{{- end }}
env:
- name: RUST_LOG
value: "info"
volumes:
- name: config
configMap:
name: {{ include "alchemist.fullname" . }}-config
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "alchemist.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end }}
{{- if .Values.media.enabled }}
- name: media
hostPath:
path: {{ .Values.media.hostPath }}
type: Directory
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "alchemist.fullname" . }}
labels:
{{- include "alchemist.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "alchemist.selectorLabels" . | nindent 4 }}

87
deploy/helm/values.yaml Normal file
View File

@@ -0,0 +1,87 @@
# Default values for alchemist
replicaCount: 1
image:
repository: ghcr.io/brooklynloveszelda/alchemist
pullPolicy: IfNotPresent
tag: "latest"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name: ""
podAnnotations: {}
podSecurityContext: {}
securityContext: {}
service:
type: ClusterIP
port: 3000
ingress:
enabled: false
className: ""
annotations: {}
hosts:
- host: alchemist.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
resources:
limits:
cpu: 4000m
memory: 4Gi
requests:
cpu: 500m
memory: 512Mi
# GPU Configuration
gpu:
enabled: false
# For NVIDIA:
# nvidia.com/gpu: 1
# For Intel QSV, use node selector and device mounts
persistence:
enabled: true
storageClass: ""
accessMode: ReadWriteOnce
size: 10Gi
# Existing claim to use
existingClaim: ""
# Media volume mounts
media:
enabled: true
# Path on the host
hostPath: /media
# Mount path in container
mountPath: /media
# Configuration
config:
# Inline config.toml content
content: |
[transcode]
size_reduction_threshold = 0.3
min_bpp_threshold = 0.1
min_file_size_mb = 50
concurrent_jobs = 1
[hardware]
allow_cpu_fallback = true
allow_cpu_encoding = true
[scanner]
directories = ["/media"]
nodeSelector: {}
tolerations: []
affinity: {}

32
docker-compose.yml Normal file
View File

@@ -0,0 +1,32 @@
services:
alchemist:
image: ghcr.io/brooklynloveszelda/alchemist:latest
container_name: alchemist
restart: unless-stopped
ports:
- "3000:3000"
volumes:
# Configuration file
- ./config.toml:/app/config.toml:ro
# Media directories (adjust paths as needed)
- /path/to/media:/media
- /path/to/output:/output
# Persistent database
- alchemist_data:/app/data
environment:
- RUST_LOG=info
- TZ=America/New_York
# For Intel QuickSync (uncomment if needed)
# devices:
# - /dev/dri:/dev/dri
# For NVIDIA GPU (uncomment if needed)
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
volumes:
alchemist_data:

141
docs/GPU_PASSTHROUGH.md Normal file
View File

@@ -0,0 +1,141 @@
# GPU Passthrough Guide
This guide explains how to enable hardware acceleration for video encoding in Docker containers.
## NVIDIA GPU (NVENC)
### Prerequisites
1. NVIDIA GPU with NVENC support (GTX 1050+ / RTX series / Quadro)
2. NVIDIA drivers installed on host
3. NVIDIA Container Toolkit
### Install NVIDIA Container Toolkit
```bash
# Add NVIDIA package repository
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | \
sudo tee /etc/apt/sources.list.d/nvidia-docker.list
# Install toolkit
sudo apt update
sudo apt install -y nvidia-container-toolkit
sudo systemctl restart docker
```
### Docker Compose Configuration
```yaml
services:
alchemist:
image: ghcr.io/brooklynloveszelda/alchemist:latest
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
environment:
- NVIDIA_VISIBLE_DEVICES=all
```
### Docker CLI
```bash
docker run --gpus all \
-p 3000:3000 \
-v /media:/media \
ghcr.io/brooklynloveszelda/alchemist:latest
```
---
## Intel QuickSync (QSV)
### Prerequisites
1. Intel CPU with integrated graphics (6th Gen+)
2. VAAPI drivers installed on host
### Install VAAPI Drivers (Host)
```bash
# Debian/Ubuntu
sudo apt install intel-media-va-driver-non-free vainfo
# Verify
vainfo
```
### Docker Compose Configuration
```yaml
services:
alchemist:
image: ghcr.io/brooklynloveszelda/alchemist:latest
devices:
- /dev/dri:/dev/dri
group_add:
- video
- render
environment:
- LIBVA_DRIVER_NAME=iHD
```
### Docker CLI
```bash
docker run --device /dev/dri:/dev/dri \
--group-add video --group-add render \
-e LIBVA_DRIVER_NAME=iHD \
-p 3000:3000 \
-v /media:/media \
ghcr.io/brooklynloveszelda/alchemist:latest
```
---
## AMD GPU (VAAPI)
### Prerequisites
1. AMD GPU with VAAPI support
2. Mesa VAAPI drivers
### Install Drivers (Host)
```bash
# Debian/Ubuntu
sudo apt install mesa-va-drivers vainfo
```
### Docker Configuration
Same as Intel QSV, but set driver:
```yaml
environment:
- LIBVA_DRIVER_NAME=radeonsi
```
---
## Verification
After starting the container, check hardware detection in the logs:
```
Selected Hardware: Intel QSV
Device Path: /dev/dri/renderD128
```
If you see `CPU (Software)`, hardware acceleration is not working.
## Troubleshooting
| Issue | Solution |
|-------|----------|
| `vainfo: error` | Install VAAPI drivers on host |
| `CUDA error` | Install NVIDIA Container Toolkit |
| CPU fallback despite GPU | Check device permissions in container |
| Permission denied on `/dev/dri` | Add `--group-add video --group-add render` |

View File

@@ -0,0 +1,19 @@
@echo off
title Alchemist Transcoding Server
echo ========================================
echo ALCHEMIST SERVER
echo ========================================
echo.
echo Starting Alchemist in server mode...
echo Access the web UI at: http://localhost:3000
echo.
echo Press Ctrl+C to stop the server.
echo ========================================
echo.
cd /d "%~dp0"
alchemist.exe --server
echo.
echo Server stopped.
pause

View File

@@ -1,3 +1,4 @@
use crate::scheduler::ScheduleConfig;
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::path::Path;
@@ -11,6 +12,8 @@ pub struct Config {
pub notifications: NotificationsConfig,
#[serde(default)]
pub quality: QualityConfig,
#[serde(default)]
pub schedule: ScheduleConfig,
}
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)]
@@ -23,7 +26,6 @@ pub enum QualityProfile {
Speed,
}
impl QualityProfile {
pub fn as_str(&self) -> &'static str {
match self {
@@ -106,6 +108,45 @@ impl std::fmt::Display for CpuPreset {
}
}
/// Output codec selection
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
#[serde(rename_all = "lowercase")]
pub enum OutputCodec {
#[default]
Av1,
Hevc,
}
impl OutputCodec {
pub fn as_str(&self) -> &'static str {
match self {
Self::Av1 => "av1",
Self::Hevc => "hevc",
}
}
}
/// Subtitle handling mode
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default)]
#[serde(rename_all = "lowercase")]
pub enum SubtitleMode {
#[default]
Copy,
Burn,
Extract,
None,
}
impl SubtitleMode {
pub fn as_str(&self) -> &'static str {
match self {
Self::Copy => "copy",
Self::Burn => "burn",
Self::Extract => "extract",
Self::None => "none",
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ScannerConfig {
@@ -122,6 +163,10 @@ pub struct TranscodeConfig {
pub concurrent_jobs: usize,
#[serde(default)]
pub quality_profile: QualityProfile,
#[serde(default)]
pub output_codec: OutputCodec,
#[serde(default)]
pub subtitle_mode: SubtitleMode,
}
// Removed default_quality_profile helper as Default trait on enum handles it now.
@@ -178,6 +223,8 @@ impl Default for Config {
min_file_size_mb: 50,
concurrent_jobs: 1,
quality_profile: QualityProfile::Balanced,
output_codec: OutputCodec::Av1,
subtitle_mode: SubtitleMode::Copy,
},
hardware: HardwareConfig {
preferred_vendor: None,
@@ -192,6 +239,7 @@ impl Default for Config {
},
notifications: NotificationsConfig::default(),
quality: QualityConfig::default(),
schedule: ScheduleConfig::default(),
}
}
}

View File

@@ -1,10 +1,11 @@
pub mod media;
pub mod system;
pub mod config;
pub mod db;
pub mod error;
pub mod media;
pub mod orchestrator;
pub mod scheduler;
pub mod server;
pub mod system;
pub mod wizard;
pub use config::QualityProfile;

151
src/scheduler.rs Normal file
View File

@@ -0,0 +1,151 @@
//! Job scheduler for time-based processing
//!
//! Allows users to configure specific hours when transcoding should run.
use chrono::{Datelike, Local, Timelike, Weekday};
use serde::{Deserialize, Serialize};
use tracing::{debug, info};
/// Schedule configuration
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ScheduleConfig {
/// Enable scheduling (if false, run 24/7)
#[serde(default)]
pub enabled: bool,
/// Start hour (0-23)
#[serde(default = "default_start_hour")]
pub start_hour: u32,
/// End hour (0-23)
#[serde(default = "default_end_hour")]
pub end_hour: u32,
/// Days of week to run (empty = all days)
#[serde(default)]
pub days: Vec<String>,
}
fn default_start_hour() -> u32 {
22
} // 10 PM
fn default_end_hour() -> u32 {
6
} // 6 AM
impl ScheduleConfig {
/// Check if we should be running right now
pub fn should_run(&self) -> bool {
if !self.enabled {
return true; // If scheduling disabled, always run
}
let now = Local::now();
let current_hour = now.hour();
// Check day of week
if !self.days.is_empty() {
let today = match now.weekday() {
Weekday::Mon => "mon",
Weekday::Tue => "tue",
Weekday::Wed => "wed",
Weekday::Thu => "thu",
Weekday::Fri => "fri",
Weekday::Sat => "sat",
Weekday::Sun => "sun",
};
if !self.days.iter().any(|d| d.to_lowercase() == today) {
debug!("Scheduler: Today ({}) not in allowed days", today);
return false;
}
}
// Check time window
let in_window = if self.start_hour <= self.end_hour {
// Normal window (e.g., 08:00 - 17:00)
current_hour >= self.start_hour && current_hour < self.end_hour
} else {
// Overnight window (e.g., 22:00 - 06:00)
current_hour >= self.start_hour || current_hour < self.end_hour
};
if !in_window {
debug!(
"Scheduler: Current hour ({}) outside window ({}-{})",
current_hour, self.start_hour, self.end_hour
);
}
in_window
}
/// Format the schedule for display
pub fn format_schedule(&self) -> String {
if !self.enabled {
return "24/7 (no schedule)".to_string();
}
let days_str = if self.days.is_empty() {
"Every day".to_string()
} else {
self.days.join(", ")
};
format!(
"{} from {:02}:00 to {:02}:00",
days_str, self.start_hour, self.end_hour
)
}
}
/// Scheduler that can pause/resume the agent based on time
pub struct Scheduler {
config: ScheduleConfig,
}
impl Scheduler {
pub fn new(config: ScheduleConfig) -> Self {
if config.enabled {
info!("Scheduler enabled: {}", config.format_schedule());
}
Self { config }
}
pub fn update_config(&mut self, config: ScheduleConfig) {
self.config = config;
}
pub fn should_run(&self) -> bool {
self.config.should_run()
}
pub fn config(&self) -> &ScheduleConfig {
&self.config
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_disabled_scheduler() {
let config = ScheduleConfig {
enabled: false,
..Default::default()
};
assert!(config.should_run());
}
#[test]
fn test_schedule_format() {
let config = ScheduleConfig {
enabled: true,
start_hour: 22,
end_hour: 6,
days: vec!["mon".to_string(), "tue".to_string()],
};
assert_eq!(config.format_schedule(), "mon, tue from 22:00 to 06:00");
}
}

View File

@@ -18,6 +18,7 @@ use futures::stream::Stream;
use rust_embed::RustEmbed;
use std::convert::Infallible;
use std::sync::Arc;
use std::time::Instant;
use tokio::sync::{broadcast, RwLock};
use tokio_stream::wrappers::BroadcastStream;
use tokio_stream::StreamExt;
@@ -34,6 +35,7 @@ pub struct AppState {
pub transcoder: Arc<Transcoder>,
pub tx: broadcast::Sender<AlchemistEvent>,
pub setup_required: bool,
pub start_time: Instant,
}
pub async fn run_server(
@@ -51,12 +53,14 @@ pub async fn run_server(
transcoder,
tx,
setup_required,
start_time: std::time::Instant::now(),
});
let app = Router::new()
// API Routes
.route("/api/scan", post(scan_handler))
.route("/api/stats", get(stats_handler))
.route("/api/stats/aggregated", get(aggregated_stats_handler))
.route("/api/jobs/table", get(jobs_table_handler))
.route("/api/jobs/restart-failed", post(restart_failed_handler))
.route("/api/jobs/clear-completed", post(clear_completed_handler))
@@ -66,6 +70,9 @@ pub async fn run_server(
.route("/api/engine/pause", post(pause_engine_handler))
.route("/api/engine/resume", post(resume_engine_handler))
.route("/api/engine/status", get(engine_status_handler))
// Health Check Routes
.route("/api/health", get(health_handler))
.route("/api/ready", get(ready_handler))
// Setup Routes
.route("/api/setup/status", get(setup_status_handler))
.route("/api/setup/complete", post(setup_complete_handler))
@@ -252,6 +259,30 @@ async fn stats_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse
}))
}
async fn aggregated_stats_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
match state.db.get_aggregated_stats().await {
Ok(stats) => {
let savings = stats.total_input_size - stats.total_output_size;
axum::Json(serde_json::json!({
"total_input_bytes": stats.total_input_size,
"total_output_bytes": stats.total_output_size,
"total_savings_bytes": savings,
"total_time_seconds": stats.total_encode_time_seconds,
"total_jobs": stats.completed_jobs,
"avg_vmaf": stats.avg_vmaf.unwrap_or(0.0)
}))
}
Err(_) => axum::Json(serde_json::json!({
"total_input_bytes": 0,
"total_output_bytes": 0,
"total_savings_bytes": 0,
"total_time_seconds": 0,
"total_jobs": 0,
"avg_vmaf": 0.0
})),
}
}
async fn jobs_table_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let jobs = state.db.get_all_jobs().await.unwrap_or_default();
axum::Json(jobs)
@@ -320,6 +351,37 @@ async fn engine_status_handler(State(state): State<Arc<AppState>>) -> impl IntoR
}))
}
async fn health_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let uptime = state.start_time.elapsed();
let hours = uptime.as_secs() / 3600;
let minutes = (uptime.as_secs() % 3600) / 60;
let seconds = uptime.as_secs() % 60;
axum::Json(serde_json::json!({
"status": "ok",
"version": env!("CARGO_PKG_VERSION"),
"uptime": format!("{}h {}m {}s", hours, minutes, seconds),
"uptime_seconds": uptime.as_secs()
}))
}
async fn ready_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
// Check if database is accessible
let db_ok = state.db.get_stats().await.is_ok();
if db_ok {
(
StatusCode::OK,
axum::Json(serde_json::json!({ "ready": true })),
)
} else {
(
StatusCode::SERVICE_UNAVAILABLE,
axum::Json(serde_json::json!({ "ready": false, "reason": "database unavailable" })),
)
}
}
async fn auth_middleware(
State(_state): State<Arc<AppState>>,
req: Request,

View File

@@ -62,6 +62,8 @@ impl ConfigWizard {
min_file_size_mb: min_file_size,
concurrent_jobs,
quality_profile: crate::config::QualityProfile::Balanced,
output_codec: crate::config::OutputCodec::Av1,
subtitle_mode: crate::config::SubtitleMode::Copy,
},
hardware: crate::config::HardwareConfig {
preferred_vendor,
@@ -76,6 +78,7 @@ impl ConfigWizard {
},
notifications: crate::config::NotificationsConfig::default(),
quality: crate::config::QualityConfig::default(),
schedule: crate::scheduler::ScheduleConfig::default(),
};
// Show summary

View File

@@ -0,0 +1,157 @@
import { useEffect, useState } from "react";
import {
TrendingDown,
Clock,
HardDrive,
Zap,
BarChart3,
Activity
} from "lucide-react";
interface AggregatedStats {
total_input_bytes: number;
total_output_bytes: number;
total_savings_bytes: number;
total_time_seconds: number;
total_jobs: number;
avg_vmaf: number;
}
interface DailyStats {
date: string;
jobs: number;
savings_mb: number;
}
export default function StatsCharts() {
const [stats, setStats] = useState<AggregatedStats | null>(null);
const [loading, setLoading] = useState(true);
useEffect(() => {
fetchStats();
}, []);
const fetchStats = async () => {
try {
const res = await fetch("/api/stats/aggregated");
if (res.ok) {
setStats(await res.json());
}
} catch (e) {
console.error("Failed to fetch stats", e);
} finally {
setLoading(false);
}
};
const formatBytes = (bytes: number) => {
if (bytes === 0) return "0 B";
const k = 1024;
const sizes = ["B", "KB", "MB", "GB", "TB"];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i];
};
const formatTime = (seconds: number) => {
const hours = Math.floor(seconds / 3600);
const minutes = Math.floor((seconds % 3600) / 60);
if (hours > 0) {
return `${hours}h ${minutes}m`;
}
return `${minutes}m`;
};
if (loading) {
return (
<div className="flex items-center justify-center py-20">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-helios-solar"></div>
</div>
);
}
if (!stats) {
return (
<div className="text-center py-20 text-helios-slate">
<BarChart3 size={48} className="mx-auto mb-4 opacity-50" />
<p>No statistics available yet.</p>
<p className="text-sm mt-2">Complete some transcoding jobs to see data here.</p>
</div>
);
}
const savingsPercent = stats.total_input_bytes > 0
? ((stats.total_savings_bytes / stats.total_input_bytes) * 100).toFixed(1)
: "0";
const StatCard = ({ icon: Icon, label, value, subtext, colorClass }: any) => (
<div className="p-6 rounded-2xl bg-helios-surface border border-helios-line/40 shadow-sm">
<div className="flex items-start justify-between">
<div>
<p className="text-sm font-medium text-helios-slate uppercase tracking-wide mb-1">{label}</p>
<p className={`text-3xl font-bold ${colorClass}`}>{value}</p>
{subtext && <p className="text-sm text-helios-slate mt-1">{subtext}</p>}
</div>
<div className={`p-3 rounded-xl ${colorClass} bg-opacity-10`}>
<Icon size={24} className={colorClass} />
</div>
</div>
</div>
);
return (
<div className="space-y-6">
{/* Main Stats Grid */}
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4">
<StatCard
icon={TrendingDown}
label="Space Saved"
value={formatBytes(stats.total_savings_bytes)}
subtext={`${savingsPercent}% reduction`}
colorClass="text-emerald-500"
/>
<StatCard
icon={HardDrive}
label="Total Processed"
value={formatBytes(stats.total_input_bytes)}
subtext={`Output: ${formatBytes(stats.total_output_bytes)}`}
colorClass="text-blue-500"
/>
<StatCard
icon={Clock}
label="Encoding Time"
value={formatTime(stats.total_time_seconds)}
subtext={`${stats.total_jobs} jobs completed`}
colorClass="text-amber-500"
/>
<StatCard
icon={Activity}
label="Avg VMAF Score"
value={stats.avg_vmaf > 0 ? stats.avg_vmaf.toFixed(1) : "N/A"}
subtext={stats.avg_vmaf > 90 ? "Excellent quality" : stats.avg_vmaf > 80 ? "Good quality" : ""}
colorClass="text-purple-500"
/>
</div>
{/* Visual Bar */}
<div className="p-6 rounded-2xl bg-helios-surface border border-helios-line/40">
<h3 className="text-lg font-bold text-helios-ink mb-4 flex items-center gap-2">
<Zap size={20} className="text-helios-solar" />
Space Efficiency
</h3>
<div className="relative h-8 bg-helios-surface-soft rounded-full overflow-hidden">
<div
className="absolute inset-y-0 left-0 bg-gradient-to-r from-emerald-500 to-emerald-400 rounded-full transition-all duration-1000"
style={{ width: `${100 - parseFloat(savingsPercent)}%` }}
/>
<div className="absolute inset-0 flex items-center justify-center text-sm font-bold text-white drop-shadow">
{formatBytes(stats.total_output_bytes)} / {formatBytes(stats.total_input_bytes)}
</div>
</div>
<div className="flex justify-between text-sm text-helios-slate mt-2">
<span>Current Size</span>
<span>Original Size</span>
</div>
</div>
</div>
);
}

13
web/src/pages/stats.astro Normal file
View File

@@ -0,0 +1,13 @@
---
import Layout from "../layouts/Layout.astro";
import StatsCharts from "../components/StatsCharts";
---
<Layout title="Statistics | Alchemist">
<div class="content-header">
<h1 class="page-title">Statistics</h1>
<p class="page-subtitle">Encoding performance and space savings</p>
</div>
<StatsCharts client:load />
</Layout>