mirror of
https://github.com/bybrooklyn/alchemist.git
synced 2026-04-18 01:43:34 -04:00
feat: comprehensive codebase improvements from audit
Architecture: - Split server.rs (4,727 LOC) into 11 focused modules - Add typed EventChannels (jobs/config/system) with appropriate capacities - Add database query timeouts (5s on critical queries) - Add graceful shutdown with signal handling API: - Add API versioning (/api/v1/) with backwards-compatible aliases - Add X-Request-Id header for request tracing - Create OpenAPI spec (docs/openapi.yaml) Security: - Add security headers middleware (CSP, X-Frame-Options, etc.) - Add HSTS header (config-gated via https_only setting) - Add config file permission check on Unix - Fix path traversal vulnerability in file browser - Add symlink detection in file browser Frontend: - Handle SSE lagged events with toast notification - Clean up banned CSS patterns in components - Add warning toast variant Testing & Docs: - Add FFmpeg integration tests with fixtures - Expand documentation site (9 new pages) - Pin MSRV to 1.85 in Cargo.toml Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
2378
docs/openapi.yaml
Normal file
2378
docs/openapi.yaml
Normal file
File diff suppressed because it is too large
Load Diff
285
docs/src/content/docs/configuration.md
Normal file
285
docs/src/content/docs/configuration.md
Normal file
@@ -0,0 +1,285 @@
|
||||
---
|
||||
title: Configuration Reference
|
||||
description: Complete reference for all Alchemist configuration options with examples.
|
||||
---
|
||||
|
||||
# Configuration Reference
|
||||
|
||||
This page provides a comprehensive reference for all Alchemist configuration options. The configuration file is written in TOML format and contains settings that control every aspect of how Alchemist processes your media.
|
||||
|
||||
## Configuration File Location
|
||||
|
||||
The configuration file is automatically created during the setup wizard. Default locations:
|
||||
|
||||
- **Linux/macOS**: `~/.config/alchemist/config.toml`
|
||||
- **Windows**: `%APPDATA%\alchemist\config.toml`
|
||||
- **Docker**: Bind mount to `/app/config/config.toml`
|
||||
|
||||
You can override the location with the `ALCHEMIST_CONFIG_PATH` environment variable.
|
||||
|
||||
## Sample Configuration
|
||||
|
||||
Here's a complete example configuration file:
|
||||
|
||||
```toml
|
||||
[appearance]
|
||||
active_theme_id = "dark"
|
||||
|
||||
[transcode]
|
||||
size_reduction_threshold = 0.3
|
||||
min_bpp_threshold = 0.1
|
||||
min_file_size_mb = 100
|
||||
concurrent_jobs = 2
|
||||
threads = 0
|
||||
quality_profile = "balanced"
|
||||
output_codec = "av1"
|
||||
allow_fallback = true
|
||||
hdr_mode = "preserve"
|
||||
tonemap_algorithm = "hable"
|
||||
tonemap_peak = 100.0
|
||||
tonemap_desat = 0.2
|
||||
subtitle_mode = "copy"
|
||||
vmaf_min_score = 93.0
|
||||
|
||||
[transcode.stream_rules]
|
||||
strip_audio_by_title = ["commentary", "director"]
|
||||
keep_audio_languages = ["eng"]
|
||||
keep_only_default_audio = false
|
||||
|
||||
[hardware]
|
||||
preferred_vendor = "nvidia"
|
||||
device_path = "/dev/dri/renderD128"
|
||||
allow_cpu_fallback = true
|
||||
cpu_preset = "medium"
|
||||
allow_cpu_encoding = true
|
||||
|
||||
[scanner]
|
||||
directories = ["/media/movies", "/media/tv"]
|
||||
watch_enabled = true
|
||||
|
||||
[[scanner.extra_watch_dirs]]
|
||||
path = "/media/incoming"
|
||||
is_recursive = true
|
||||
|
||||
[notifications]
|
||||
enabled = true
|
||||
notify_on_complete = true
|
||||
notify_on_failure = true
|
||||
|
||||
[[notifications.targets]]
|
||||
name = "discord"
|
||||
target_type = "discord"
|
||||
endpoint_url = "https://discord.com/api/webhooks/..."
|
||||
enabled = true
|
||||
events = ["job_complete", "job_failed"]
|
||||
|
||||
[files]
|
||||
delete_source = false
|
||||
output_extension = "mkv"
|
||||
output_suffix = "-alchemist"
|
||||
replace_strategy = "keep"
|
||||
output_root = "/media/transcoded"
|
||||
|
||||
[schedule]
|
||||
[[schedule.windows]]
|
||||
start_time = "22:00"
|
||||
end_time = "06:00"
|
||||
days_of_week = [1, 2, 3, 4, 5]
|
||||
enabled = true
|
||||
|
||||
[quality]
|
||||
enable_vmaf = true
|
||||
min_vmaf_score = 90.0
|
||||
revert_on_low_quality = true
|
||||
|
||||
[system]
|
||||
monitoring_poll_interval = 2.0
|
||||
enable_telemetry = false
|
||||
log_retention_days = 30
|
||||
engine_mode = "balanced"
|
||||
https_only = false
|
||||
```
|
||||
|
||||
## Section Reference
|
||||
|
||||
### Appearance (`appearance`)
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `active_theme_id` | string | `null` | Theme ID for the web interface ("light", "dark", or custom) |
|
||||
|
||||
### Transcoding (`transcode`)
|
||||
|
||||
Core settings that control the video encoding process.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `size_reduction_threshold` | float | `0.3` | Minimum expected size reduction (0.3 = 30%) to proceed with transcoding |
|
||||
| `min_bpp_threshold` | float | `0.1` | Minimum bits-per-pixel to consider a file worth transcoding |
|
||||
| `min_file_size_mb` | integer | `50` | Skip files smaller than this (in MB) |
|
||||
| `concurrent_jobs` | integer | `1` | Number of simultaneous transcoding jobs |
|
||||
| `threads` | integer | `0` | CPU threads per job (0 = automatic) |
|
||||
| `quality_profile` | string | `"balanced"` | Speed vs quality tradeoff: `"quality"`, `"balanced"`, `"speed"` |
|
||||
| `output_codec` | string | `"av1"` | Target codec: `"av1"`, `"hevc"`, `"h264"` |
|
||||
| `allow_fallback` | boolean | `true` | Fall back to CPU if hardware encoding fails |
|
||||
| `hdr_mode` | string | `"preserve"` | HDR handling: `"preserve"` or `"tonemap"` |
|
||||
| `tonemap_algorithm` | string | `"hable"` | Tonemap method: `"hable"`, `"mobius"`, `"reinhard"`, `"clip"` |
|
||||
| `tonemap_peak` | float | `100.0` | Target peak luminance for tonemapping (nits) |
|
||||
| `tonemap_desat` | float | `0.2` | Desaturation factor during tonemapping |
|
||||
| `subtitle_mode` | string | `"copy"` | Subtitle handling: `"copy"`, `"burn"`, `"extract"`, `"none"` |
|
||||
| `vmaf_min_score` | float | `null` | Minimum VMAF score to accept transcode (optional) |
|
||||
|
||||
#### Quality Profiles
|
||||
|
||||
Each profile adjusts encoding parameters for different priorities:
|
||||
|
||||
- **Quality**: Slower encoding, best compression (CRF 24, preset slow)
|
||||
- **Balanced**: Good compromise (CRF 28, preset medium)
|
||||
- **Speed**: Faster encoding, larger files (CRF 32, preset fast)
|
||||
|
||||
#### Stream Rules (`transcode.stream_rules`)
|
||||
|
||||
Audio track filtering rules applied before transcoding:
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `strip_audio_by_title` | array | `[]` | Remove tracks containing these strings (case-insensitive) |
|
||||
| `keep_audio_languages` | array | `[]` | Keep only tracks with these ISO 639-2 language codes |
|
||||
| `keep_only_default_audio` | boolean | `false` | Keep only the default audio track |
|
||||
|
||||
### Hardware (`hardware`)
|
||||
|
||||
Graphics card and CPU encoding settings.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `preferred_vendor` | string | `null` | Preferred encoder: `"nvidia"`, `"intel"`, `"amd"`, `"apple"`, `"cpu"` |
|
||||
| `device_path` | string | `null` | Specific GPU device path (e.g., `/dev/dri/renderD128`) |
|
||||
| `allow_cpu_fallback` | boolean | `true` | Use CPU encoding if GPU unavailable |
|
||||
| `cpu_preset` | string | `"medium"` | CPU preset: `"slow"`, `"medium"`, `"fast"`, `"faster"` |
|
||||
| `allow_cpu_encoding` | boolean | `true` | Enable CPU encoding entirely |
|
||||
|
||||
### Scanner (`scanner`)
|
||||
|
||||
File discovery and monitoring settings.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `directories` | array | `[]` | Base directories to scan for media files |
|
||||
| `watch_enabled` | boolean | `false` | Enable real-time file monitoring |
|
||||
| `extra_watch_dirs` | array | `[]` | Additional directories with custom settings |
|
||||
|
||||
#### Extra Watch Directories
|
||||
|
||||
Each entry in `extra_watch_dirs` supports:
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `path` | string | Required | Directory path to monitor |
|
||||
| `is_recursive` | boolean | `true` | Include subdirectories |
|
||||
|
||||
### Notifications (`notifications`)
|
||||
|
||||
Alert settings for job completion and failures.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `enabled` | boolean | `false` | Enable notification system |
|
||||
| `notify_on_complete` | boolean | `false` | Send alerts when jobs complete successfully |
|
||||
| `notify_on_failure` | boolean | `false` | Send alerts when jobs fail |
|
||||
| `targets` | array | `[]` | Notification endpoints (see below) |
|
||||
|
||||
#### Notification Targets
|
||||
|
||||
Each target in the `targets` array supports:
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `name` | string | Required | Friendly name for this target |
|
||||
| `target_type` | string | Required | Type: `"discord"`, `"gotify"`, `"webhook"` |
|
||||
| `endpoint_url` | string | Required | Full URL for the notification service |
|
||||
| `auth_token` | string | `null` | Authentication token if required |
|
||||
| `events` | array | `[]` | Events to send: `"job_complete"`, `"job_failed"` |
|
||||
| `enabled` | boolean | `true` | Whether this target is active |
|
||||
|
||||
### Files (`files`)
|
||||
|
||||
Output file naming and handling preferences.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `delete_source` | boolean | `false` | Delete original files after successful transcode |
|
||||
| `output_extension` | string | `"mkv"` | Container format for output files |
|
||||
| `output_suffix` | string | `"-alchemist"` | Suffix added to transcoded filenames |
|
||||
| `replace_strategy` | string | `"keep"` | How to handle existing output files: `"keep"`, `"overwrite"` |
|
||||
| `output_root` | string | `null` | Alternative output directory (preserves folder structure) |
|
||||
|
||||
### Schedule (`schedule`)
|
||||
|
||||
Time-based restrictions for when transcoding can occur.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `windows` | array | `[]` | Time windows when transcoding is allowed |
|
||||
|
||||
#### Schedule Windows
|
||||
|
||||
Each window supports:
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `start_time` | string | Required | Start time in 24-hour format (HH:MM) |
|
||||
| `end_time` | string | Required | End time in 24-hour format (HH:MM) |
|
||||
| `days_of_week` | array | `[]` | Days 0-6 (Sunday=0) when this window applies |
|
||||
| `enabled` | boolean | `true` | Whether this window is active |
|
||||
|
||||
### Quality (`quality`)
|
||||
|
||||
Advanced quality verification settings.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `enable_vmaf` | boolean | `false` | Enable VMAF quality scoring (slow) |
|
||||
| `min_vmaf_score` | float | `90.0` | Minimum VMAF score to accept transcode |
|
||||
| `revert_on_low_quality` | boolean | `true` | Delete output and keep original if VMAF score too low |
|
||||
|
||||
### System (`system`)
|
||||
|
||||
Application-level settings and monitoring.
|
||||
|
||||
| Setting | Type | Default | Description |
|
||||
|---------|------|---------|-------------|
|
||||
| `monitoring_poll_interval` | float | `2.0` | Seconds between system status updates |
|
||||
| `enable_telemetry` | boolean | `false` | Send anonymous usage statistics |
|
||||
| `log_retention_days` | integer | `30` | Days to keep log files (null = forever) |
|
||||
| `engine_mode` | string | `"balanced"` | Processing mode: `"background"`, `"balanced"`, `"throughput"` |
|
||||
| `https_only` | boolean | `false` | Enforce HTTPS (only enable behind reverse proxy) |
|
||||
|
||||
#### Engine Modes
|
||||
|
||||
- **Background**: Minimal resource usage (1 job max)
|
||||
- **Balanced**: Reasonable performance (½ CPU cores, max 4 jobs)
|
||||
- **Throughput**: Maximum performance (½ CPU cores, no job limit)
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Alchemist respects these environment variables:
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `ALCHEMIST_CONFIG_PATH` | Path to config file | `~/.config/alchemist/config.toml` |
|
||||
| `ALCHEMIST_DB_PATH` | Path to database file | `~/.config/alchemist/alchemist.db` |
|
||||
| `ALCHEMIST_CONFIG_MUTABLE` | Allow runtime config changes | `true` |
|
||||
| `RUST_LOG` | Logging level | `info` |
|
||||
| `PORT` | Web server port | `3000` |
|
||||
| `HOST` | Web server bind address | `0.0.0.0` |
|
||||
|
||||
## Configuration Validation
|
||||
|
||||
Alchemist validates your configuration on startup and will show specific error messages for:
|
||||
- Invalid values or types
|
||||
- Missing required fields
|
||||
- Conflicting settings
|
||||
- Unreachable file paths
|
||||
|
||||
Check the logs if Alchemist fails to start after configuration changes.
|
||||
500
docs/src/content/docs/faq.md
Normal file
500
docs/src/content/docs/faq.md
Normal file
@@ -0,0 +1,500 @@
|
||||
---
|
||||
title: Frequently Asked Questions
|
||||
description: Comprehensive FAQ covering everything you need to know about Alchemist.
|
||||
---
|
||||
|
||||
# Frequently Asked Questions
|
||||
|
||||
This comprehensive FAQ answers the most common questions about Alchemist. Questions are organized by topic for easy navigation.
|
||||
|
||||
## General Questions
|
||||
|
||||
### What exactly does Alchemist do?
|
||||
|
||||
Alchemist is a **smart video transcoding pipeline** that automatically converts your media library to more efficient formats. Think of it as a "garbage compactor" for video files that:
|
||||
|
||||
- Scans your media collection automatically
|
||||
- Analyzes each file to determine if transcoding would help
|
||||
- Uses hardware acceleration (GPU) when available
|
||||
- Only transcodes files that will actually benefit
|
||||
- Preserves your originals until you're satisfied with results
|
||||
- Provides a web dashboard to monitor everything
|
||||
|
||||
### Is Alchemist free?
|
||||
|
||||
**Yes, completely free!** Alchemist is open-source software released under the GPLv3 license. This means:
|
||||
|
||||
- No cost to download, install, or use
|
||||
- Source code is publicly available
|
||||
- No premium features or subscriptions
|
||||
- No telemetry or tracking (unless you explicitly enable it)
|
||||
- Community-driven development
|
||||
|
||||
### How much storage will I save?
|
||||
|
||||
Typical savings depend on your source content:
|
||||
|
||||
| Content Type | Typical Savings | Example |
|
||||
|-------------|----------------|---------|
|
||||
| **Old TV shows** (DVD/early digital) | 50-80% | 2GB → 600MB |
|
||||
| **Blu-ray rips** (H.264) | 30-60% | 25GB → 12GB |
|
||||
| **Modern streaming** (already efficient) | 0-20% | Often skipped |
|
||||
| **4K content** | 40-70% | 60GB → 25GB |
|
||||
|
||||
**Average across mixed libraries: 40-60% space savings**
|
||||
|
||||
### Will Alchemist ruin my video quality?
|
||||
|
||||
**No.** Alchemist is designed with quality protection:
|
||||
|
||||
- **Intelligent analysis** checks if transcoding will help before starting
|
||||
- **Quality thresholds** prevent processing files that are already efficient
|
||||
- **VMAF scoring** (optional) verifies output quality mathematically
|
||||
- **Conservative defaults** prioritize quality over maximum compression
|
||||
- **Originals preserved** until you manually approve deletion
|
||||
|
||||
If Alchemist thinks a transcode would hurt quality, it skips the file and tells you why.
|
||||
|
||||
### Does it work on Windows, Mac, and Linux?
|
||||
|
||||
**Yes, all three.** Alchemist works on:
|
||||
|
||||
| Platform | Status | Notes |
|
||||
|----------|--------|-------|
|
||||
| **Linux x86_64/ARM64** | ✅ Fully supported | Best performance |
|
||||
| **Windows x86_64** | ✅ Fully supported | Good GPU support |
|
||||
| **macOS Intel/Apple Silicon** | ✅ Fully supported | VideoToolbox acceleration |
|
||||
| **Docker** | ✅ Recommended | Works anywhere Docker runs |
|
||||
|
||||
## Hardware & Performance
|
||||
|
||||
### Do I need a powerful graphics card?
|
||||
|
||||
**No, but it helps a lot.** Alchemist works in any configuration:
|
||||
|
||||
- **With GPU**: 20-60 minutes per movie, low power usage
|
||||
- **CPU only**: 2-8 hours per movie, higher power usage
|
||||
- **Automatic fallback**: Uses GPU when available, CPU when not
|
||||
|
||||
**Supported GPUs:**
|
||||
- NVIDIA (GTX 10-series and newer)
|
||||
- Intel integrated graphics (6th gen and newer)
|
||||
- AMD Radeon (RX 400-series and newer)
|
||||
- Apple Silicon (M1/M2/M3)
|
||||
|
||||
### What's the difference between GPU and CPU encoding?
|
||||
|
||||
| Aspect | GPU Encoding | CPU Encoding |
|
||||
|--------|--------------|--------------|
|
||||
| **Speed** | 5-20x faster | Baseline |
|
||||
| **Quality** | Very good | Excellent |
|
||||
| **Power usage** | Lower | Higher |
|
||||
| **Compatibility** | Requires supported GPU | Works everywhere |
|
||||
| **Cost** | GPU hardware needed | Uses existing CPU |
|
||||
|
||||
**Bottom line**: GPU encoding is much faster and more efficient, but CPU encoding produces slightly better quality.
|
||||
|
||||
### Can I limit when Alchemist runs?
|
||||
|
||||
**Yes!** Multiple ways to control when processing happens:
|
||||
|
||||
1. **Engine modes**:
|
||||
- **Background**: Minimal resource usage
|
||||
- **Balanced**: Moderate performance (default)
|
||||
- **Throughput**: Maximum performance
|
||||
|
||||
2. **Scheduling**:
|
||||
```toml
|
||||
[schedule]
|
||||
[[schedule.windows]]
|
||||
start_time = "22:00" # 10 PM
|
||||
end_time = "06:00" # 6 AM
|
||||
days_of_week = [1, 2, 3, 4, 5] # Weekdays only
|
||||
```
|
||||
|
||||
3. **Manual control**: Pause/resume anytime from the dashboard
|
||||
|
||||
### How many files can it process simultaneously?
|
||||
|
||||
Depends on your hardware:
|
||||
|
||||
| System Type | Recommended Concurrent Jobs |
|
||||
|-------------|----------------------------|
|
||||
| **Basic CPU** (4 cores) | 1 |
|
||||
| **Good CPU** (8+ cores) | 2 |
|
||||
| **GPU + good CPU** | 2-3 |
|
||||
| **High-end workstation** | 4+ |
|
||||
|
||||
The system auto-adjusts based on available resources, but you can override:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 2 # Manual override
|
||||
```
|
||||
|
||||
## Quality & Codecs
|
||||
|
||||
### What's the difference between AV1, HEVC, and H.264?
|
||||
|
||||
| Codec | Compression | Compatibility | Encoding Speed | Best For |
|
||||
|-------|-------------|---------------|----------------|----------|
|
||||
| **AV1** | Excellent (30% better than HEVC) | Newer devices | Slower | Future-proofing, archival |
|
||||
| **HEVC/H.265** | Very good (50% better than H.264) | Most modern devices | Medium | General use, good balance |
|
||||
| **H.264** | Good (baseline) | Universal | Fastest | Compatibility, quick results |
|
||||
|
||||
**Recommendation**: Start with HEVC for best balance of quality, compatibility, and encoding speed.
|
||||
|
||||
### Should I enable VMAF quality checking?
|
||||
|
||||
**VMAF** is Netflix's quality measurement tool. Enable if:
|
||||
|
||||
✅ **Yes, if you:**
|
||||
- Have critical content you can't re-encode
|
||||
- Want mathematical quality verification
|
||||
- Don't mind 2-3x slower encoding
|
||||
- Are a quality enthusiast
|
||||
|
||||
❌ **No, if you:**
|
||||
- Want fast processing
|
||||
- Trust Alchemist's quality settings
|
||||
- Have large libraries to process
|
||||
- Use reasonable quality settings already
|
||||
|
||||
### What happens to HDR content?
|
||||
|
||||
Alchemist can handle HDR content two ways:
|
||||
|
||||
1. **Preserve HDR** (default):
|
||||
```toml
|
||||
[transcode]
|
||||
hdr_mode = "preserve"
|
||||
```
|
||||
- Keeps HDR metadata intact
|
||||
- Requires HDR-capable display for viewing
|
||||
- Smaller file size impact
|
||||
|
||||
2. **Tonemap to SDR**:
|
||||
```toml
|
||||
[transcode]
|
||||
hdr_mode = "tonemap"
|
||||
tonemap_algorithm = "hable" # Recommended
|
||||
```
|
||||
- Converts to standard dynamic range
|
||||
- Works on any display
|
||||
- Slight quality loss in bright scenes
|
||||
|
||||
### Can I customize quality settings per library?
|
||||
|
||||
**Yes!** Use different profiles for different content:
|
||||
|
||||
```toml
|
||||
# Movies: Maximum quality
|
||||
[profiles.movies]
|
||||
quality_profile = "quality"
|
||||
output_codec = "av1"
|
||||
min_file_size_mb = 500 # Only large files
|
||||
|
||||
# TV Shows: Faster processing
|
||||
[profiles.tv_shows]
|
||||
quality_profile = "speed"
|
||||
output_codec = "hevc"
|
||||
min_file_size_mb = 100
|
||||
|
||||
# Home videos: Preserve originals
|
||||
[profiles.home_videos]
|
||||
delete_source = false
|
||||
output_codec = "h264"
|
||||
```
|
||||
|
||||
## File Management
|
||||
|
||||
### What happens to my original files?
|
||||
|
||||
**By default, originals are kept safe.** Alchemist:
|
||||
|
||||
1. Creates new file with `-alchemist` suffix
|
||||
2. Verifies the new file works correctly
|
||||
3. Keeps both files until you decide
|
||||
|
||||
**Options for originals:**
|
||||
- **Keep both** (default, safest)
|
||||
- **Manual review** then delete originals
|
||||
- **Auto-delete** after successful transcode (risky)
|
||||
|
||||
```toml
|
||||
[files]
|
||||
delete_source = false # Keep originals (recommended)
|
||||
output_suffix = "-alchemist"
|
||||
replace_strategy = "keep" # Don't overwrite existing files
|
||||
```
|
||||
|
||||
### Can I organize output files differently?
|
||||
|
||||
**Yes!** Several organization options:
|
||||
|
||||
1. **Same location with suffix** (default):
|
||||
```
|
||||
/media/Movie.mkv
|
||||
/media/Movie-alchemist.mkv
|
||||
```
|
||||
|
||||
2. **Separate output directory**:
|
||||
```toml
|
||||
[files]
|
||||
output_root = "/media/transcoded"
|
||||
```
|
||||
Result:
|
||||
```
|
||||
/media/movies/Movie.mkv (original)
|
||||
/media/transcoded/movies/Movie.mkv (transcoded)
|
||||
```
|
||||
|
||||
3. **Custom file extensions**:
|
||||
```toml
|
||||
[files]
|
||||
output_extension = "mp4" # Change container format
|
||||
```
|
||||
|
||||
### How do I handle different languages and audio tracks?
|
||||
|
||||
**Stream rules** let you customize audio handling:
|
||||
|
||||
```toml
|
||||
[transcode.stream_rules]
|
||||
# Remove commentary tracks
|
||||
strip_audio_by_title = ["commentary", "director", "behind"]
|
||||
|
||||
# Keep only English and Japanese audio
|
||||
keep_audio_languages = ["eng", "jpn"]
|
||||
|
||||
# Or keep only the default audio track
|
||||
keep_only_default_audio = true
|
||||
```
|
||||
|
||||
**Audio encoding options**:
|
||||
```toml
|
||||
[transcode]
|
||||
audio_mode = "copy" # Keep original (recommended)
|
||||
# audio_mode = "aac" # Transcode to AAC
|
||||
# audio_mode = "aac_stereo" # Downmix to stereo AAC
|
||||
```
|
||||
|
||||
## Setup & Configuration
|
||||
|
||||
### Docker vs. binary installation - which should I choose?
|
||||
|
||||
| Method | Pros | Cons | Best For |
|
||||
|--------|------|------|----------|
|
||||
| **Docker** | ✅ Easy setup<br>✅ All dependencies included<br>✅ Consistent across systems<br>✅ Easy updates | ❌ Slightly more complex config<br>❌ Docker overhead | Most users, especially beginners |
|
||||
| **Binary** | ✅ Direct system access<br>✅ Lower overhead<br>✅ No Docker complexity | ❌ Manual dependency management<br>❌ Platform-specific issues | Advanced users, specialized setups |
|
||||
|
||||
**Recommendation**: Use Docker unless you have specific needs for binary installation.
|
||||
|
||||
### How do I update Alchemist?
|
||||
|
||||
**Docker update**:
|
||||
```bash
|
||||
# Pull latest image
|
||||
docker pull ghcr.io/bybrooklyn/alchemist:latest
|
||||
|
||||
# Restart container
|
||||
docker compose down && docker compose up -d
|
||||
```
|
||||
|
||||
**Binary update**:
|
||||
1. Download new binary from GitHub releases
|
||||
2. Stop current Alchemist instance
|
||||
3. Replace binary file
|
||||
4. Restart Alchemist
|
||||
|
||||
**Database migrations** are automatic - your settings and history are preserved.
|
||||
|
||||
### Can I run multiple Alchemist instances?
|
||||
|
||||
**Generally no** - Alchemist is designed as a single-instance application. However:
|
||||
|
||||
✅ **Supported scenarios**:
|
||||
- Different libraries on different machines
|
||||
- Test instance with separate config/database
|
||||
|
||||
❌ **Not supported**:
|
||||
- Multiple instances accessing the same library
|
||||
- Multiple instances sharing a database
|
||||
- Load balancing across instances
|
||||
|
||||
For high-performance setups, use:
|
||||
- Higher concurrent job count
|
||||
- Faster hardware
|
||||
- Multiple GPUs in single instance (future feature)
|
||||
|
||||
## Troubleshooting & Support
|
||||
|
||||
### Why aren't my files being processed?
|
||||
|
||||
**Common reasons files get skipped**:
|
||||
|
||||
1. **Too small**: Below `min_file_size_mb` threshold
|
||||
2. **Already efficient**: Below `size_reduction_threshold`
|
||||
3. **Good quality**: Above `min_bpp_threshold`
|
||||
4. **Wrong format**: Not a supported video file
|
||||
5. **File errors**: Corrupted or unreadable
|
||||
|
||||
Check the **Library Doctor** for detailed analysis of why files were skipped.
|
||||
|
||||
### How do I know if hardware acceleration is working?
|
||||
|
||||
**Check the Dashboard**:
|
||||
- Hardware status shows detected GPU
|
||||
- Job details show encoder being used
|
||||
|
||||
**Check system monitors**:
|
||||
```bash
|
||||
# NVIDIA
|
||||
nvidia-smi
|
||||
|
||||
# Intel
|
||||
intel_gpu_top
|
||||
|
||||
# AMD
|
||||
radeontop
|
||||
|
||||
# General
|
||||
htop # Look for low CPU usage during encoding
|
||||
```
|
||||
|
||||
**Look for logs**:
|
||||
```
|
||||
[INFO] Using NVENC for encoding
|
||||
[INFO] Hardware encoder initialized: hevc_nvenc
|
||||
```
|
||||
|
||||
### What are "Library Doctor" issues?
|
||||
|
||||
**Library Doctor** scans your media for problems:
|
||||
|
||||
- **Corrupted files**: Won't play properly
|
||||
- **Encoding errors**: Video/audio sync issues
|
||||
- **Missing data**: Incomplete downloads
|
||||
- **Format issues**: Unusual codecs or containers
|
||||
|
||||
It's a **diagnostic tool** - not all issues need fixing, but you should be aware of them.
|
||||
|
||||
### Performance is slower than expected - what to check?
|
||||
|
||||
**Diagnosis checklist**:
|
||||
|
||||
1. **Verify hardware acceleration**:
|
||||
- Check dashboard shows GPU detected
|
||||
- Monitor GPU usage during encoding
|
||||
|
||||
2. **Check system resources**:
|
||||
- CPU usage (should be low with GPU)
|
||||
- RAM availability
|
||||
- Disk speed (especially important for 4K)
|
||||
|
||||
3. **Optimize settings**:
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "speed" # Faster encoding
|
||||
concurrent_jobs = 1 # Reduce if system struggles
|
||||
```
|
||||
|
||||
4. **Check thermal throttling**:
|
||||
- Monitor CPU/GPU temperatures
|
||||
- Ensure adequate cooling
|
||||
|
||||
### Getting help with issues
|
||||
|
||||
**Before asking for help**:
|
||||
|
||||
1. **Check this FAQ** and the [Troubleshooting Guide](/troubleshooting/)
|
||||
2. **Search existing issues** on GitHub
|
||||
3. **Enable debug logging**: `RUST_LOG=debug`
|
||||
|
||||
**When reporting issues, include**:
|
||||
- System information (OS, hardware)
|
||||
- Alchemist version
|
||||
- Configuration file (remove sensitive data)
|
||||
- Relevant log excerpts
|
||||
- Steps to reproduce the problem
|
||||
|
||||
**Where to get help**:
|
||||
- **GitHub Issues**: [github.com/bybrooklyn/alchemist/issues](https://github.com/bybrooklyn/alchemist/issues)
|
||||
- **Documentation**: This site
|
||||
- **Community**: GitHub Discussions
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Can I customize the FFmpeg commands?
|
||||
|
||||
**Limited customization** is available through encoder args:
|
||||
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
# Example: Custom quality settings
|
||||
extra_args = [
|
||||
"-crf", "22", # Custom quality level
|
||||
"-preset", "slower", # Custom speed preset
|
||||
"-tune", "film" # Optimize for film content
|
||||
]
|
||||
```
|
||||
|
||||
**Note**: Full FFmpeg customization isn't supported to maintain reliability and quality consistency.
|
||||
|
||||
### How do I migrate from other transcoding tools?
|
||||
|
||||
**From Tdarr**:
|
||||
- Export your Tdarr library database
|
||||
- Point Alchemist at the same directories
|
||||
- Let Alchemist re-scan and analyze files
|
||||
- Alchemist will skip already-optimized files
|
||||
|
||||
**From HandBrake batch scripts**:
|
||||
- Point Alchemist at your source directories
|
||||
- Configure similar quality settings
|
||||
- Alchemist automates the batch process
|
||||
|
||||
**From other tools**:
|
||||
- Most tools can coexist with Alchemist
|
||||
- Use different output suffixes to avoid conflicts
|
||||
- Alchemist focuses on automation vs. manual control
|
||||
|
||||
### Can I use Alchemist in a production environment?
|
||||
|
||||
**Alchemist is designed for home users** but can work in professional contexts:
|
||||
|
||||
✅ **Good for**:
|
||||
- Personal media servers
|
||||
- Small office setups
|
||||
- Content creators' personal libraries
|
||||
- Automated archival workflows
|
||||
|
||||
⚠️ **Consider limitations**:
|
||||
- Single-instance design
|
||||
- Limited customization
|
||||
- Home-focused feature set
|
||||
- Community support only
|
||||
|
||||
For enterprise needs, consider commercial solutions or custom development.
|
||||
|
||||
### Integration with media servers (Plex, Jellyfin, etc.)
|
||||
|
||||
**Alchemist works alongside media servers**:
|
||||
|
||||
1. **Point Alchemist** at your media directories
|
||||
2. **Configure output** to same location or separate folder
|
||||
3. **Media servers** automatically detect transcoded files
|
||||
4. **Use scheduling** to avoid conflicts during peak usage
|
||||
|
||||
**Best practices**:
|
||||
- Run during off-peak hours
|
||||
- Test with small batches first
|
||||
- Monitor media server performance
|
||||
- Keep originals until you verify everything works
|
||||
|
||||
**Common workflow**:
|
||||
```
|
||||
Media files → Alchemist → Optimized files → Media server → Streaming
|
||||
```
|
||||
|
||||
This setup gives you both the convenience of automated transcoding and the features of your preferred media server.
|
||||
183
docs/src/content/docs/getting-started.md
Normal file
183
docs/src/content/docs/getting-started.md
Normal file
@@ -0,0 +1,183 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Complete guide to installing and setting up Alchemist for the first time.
|
||||
---
|
||||
|
||||
# Getting Started with Alchemist
|
||||
|
||||
This guide will walk you through installing Alchemist, completing the setup wizard, and adding your first media library. By the end, you'll have Alchemist automatically transcoding your videos to save storage space.
|
||||
|
||||
## Installation Options
|
||||
|
||||
Alchemist can be installed in three ways. **Docker is strongly recommended** for the best experience since it includes all necessary dependencies and hardware drivers.
|
||||
|
||||
### Docker (Recommended)
|
||||
|
||||
Docker provides the smoothest installation experience with automatic hardware detection and all FFmpeg dependencies pre-installed.
|
||||
|
||||
#### Docker Compose
|
||||
Create a `docker-compose.yml` file:
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
alchemist:
|
||||
image: ghcr.io/bybrooklyn/alchemist:latest
|
||||
container_name: alchemist
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./config:/app/config
|
||||
- ./data:/app/data
|
||||
- /path/to/your/media:/media
|
||||
environment:
|
||||
- ALCHEMIST_CONFIG_PATH=/app/config/config.toml
|
||||
- ALCHEMIST_DB_PATH=/app/data/alchemist.db
|
||||
- TZ=UTC
|
||||
restart: unless-stopped
|
||||
# For NVIDIA GPUs:
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
# For Intel/AMD GPUs on Linux:
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
```
|
||||
|
||||
Then run:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
#### Docker Run
|
||||
For a quick one-liner installation:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name alchemist \
|
||||
-p 3000:3000 \
|
||||
-v ./config:/app/config \
|
||||
-v ./data:/app/data \
|
||||
-v /path/to/your/media:/media \
|
||||
-e ALCHEMIST_CONFIG_PATH=/app/config/config.toml \
|
||||
-e ALCHEMIST_DB_PATH=/app/data/alchemist.db \
|
||||
--restart unless-stopped \
|
||||
ghcr.io/bybrooklyn/alchemist:latest
|
||||
```
|
||||
|
||||
### Binary Installation
|
||||
|
||||
Download pre-built binaries from [GitHub Releases](https://github.com/bybrooklyn/alchemist/releases) for:
|
||||
- Linux x86_64 and ARM64
|
||||
- Windows x86_64
|
||||
- macOS Intel and Apple Silicon
|
||||
|
||||
**Requirements:**
|
||||
- FFmpeg must be installed separately
|
||||
- Hardware drivers for GPU acceleration (optional but recommended)
|
||||
|
||||
#### Install FFmpeg
|
||||
|
||||
**Linux:**
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt install ffmpeg
|
||||
|
||||
# Fedora/RHEL
|
||||
sudo dnf install ffmpeg
|
||||
|
||||
# Arch Linux
|
||||
sudo pacman -S ffmpeg
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
```bash
|
||||
brew install ffmpeg
|
||||
```
|
||||
|
||||
**Windows:**
|
||||
```bash
|
||||
winget install Gyan.FFmpeg
|
||||
```
|
||||
|
||||
#### Run Alchemist
|
||||
```bash
|
||||
# Linux/macOS
|
||||
./alchemist
|
||||
|
||||
# Windows
|
||||
alchemist.exe
|
||||
```
|
||||
|
||||
### Build from Source
|
||||
|
||||
For developers or users who want the latest features:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/bybrooklyn/alchemist.git
|
||||
cd alchemist
|
||||
cargo build --release
|
||||
./target/release/alchemist
|
||||
```
|
||||
|
||||
**Requirements:**
|
||||
- Rust toolchain (latest stable)
|
||||
- FFmpeg installed separately
|
||||
- Node.js 18+ for building the web interface
|
||||
|
||||
## First Run Setup
|
||||
|
||||
1. **Access the Web Interface**
|
||||
Open http://localhost:3000 in your browser
|
||||
|
||||
2. **Complete the Setup Wizard**
|
||||
- Set admin password
|
||||
- Choose hardware preferences
|
||||
- Configure basic transcoding settings
|
||||
- This takes about 2-3 minutes
|
||||
|
||||
3. **Hardware Detection**
|
||||
Alchemist will automatically detect:
|
||||
- NVIDIA GPUs (NVENC)
|
||||
- Intel integrated graphics (QSV)
|
||||
- AMD graphics (VAAPI/AMF)
|
||||
- Apple Silicon (VideoToolbox)
|
||||
- Falls back to CPU encoding if no GPU found
|
||||
|
||||
## Adding Your First Library
|
||||
|
||||
1. **Navigate to Watch Folders**
|
||||
Go to Settings → Watch Folders
|
||||
|
||||
2. **Add Media Directory**
|
||||
- Click "Add Folder"
|
||||
- Browse to your media collection
|
||||
- Enable "Recursive" to include subdirectories
|
||||
- Enable "Watch Mode" for automatic scanning
|
||||
|
||||
3. **Start Processing**
|
||||
- Alchemist begins scanning immediately
|
||||
- Initial scan shows which files are candidates for transcoding
|
||||
- Processing starts automatically based on your settings
|
||||
|
||||
## Understanding the Dashboard
|
||||
|
||||
Once running, monitor progress from the main dashboard:
|
||||
|
||||
- **Active Jobs**: Currently transcoding files
|
||||
- **Queue**: Files waiting to be processed
|
||||
- **Statistics**: Storage saved, files processed
|
||||
- **System Status**: Hardware usage, temperatures
|
||||
|
||||
## Next Steps
|
||||
|
||||
- **Configure Profiles**: Set different behaviors for movies vs TV shows
|
||||
- **Set Schedules**: Limit transcoding to off-peak hours
|
||||
- **Enable Notifications**: Get alerts when jobs complete
|
||||
- **Review Quality Settings**: Adjust the balance between size and quality
|
||||
|
||||
See the [Configuration Reference](/reference/configuration/) for detailed setting explanations.
|
||||
371
docs/src/content/docs/hardware/amd.md
Normal file
371
docs/src/content/docs/hardware/amd.md
Normal file
@@ -0,0 +1,371 @@
|
||||
---
|
||||
title: AMD GPU Setup (VAAPI/AMF)
|
||||
description: Complete guide to setting up AMD hardware acceleration using VAAPI (Linux) and AMF (Windows).
|
||||
---
|
||||
|
||||
# AMD GPU Setup (VAAPI/AMF)
|
||||
|
||||
AMD GPUs support hardware-accelerated encoding through VAAPI on Linux and AMF on Windows. This guide covers setup and optimization for AMD Radeon graphics cards.
|
||||
|
||||
## Supported Hardware
|
||||
|
||||
Hardware encoding support varies by AMD GPU generation:
|
||||
|
||||
| Generation | Codecs | Linux (VAAPI) | Windows (AMF) |
|
||||
|------------|---------|---------------|---------------|
|
||||
| **GCN 1.0 (HD 7000)** | H.264 | Limited | Limited |
|
||||
| **GCN 2.0 (R7/R9 200)** | H.264 | Yes | Yes |
|
||||
| **GCN 3.0/4.0 (RX 400/500)** | H.264, HEVC | Yes | Yes |
|
||||
| **RDNA 1 (RX 5000)** | H.264, HEVC | Yes | Yes |
|
||||
| **RDNA 2 (RX 6000)** | H.264, HEVC | Yes | Yes |
|
||||
| **RDNA 3 (RX 7000)** | H.264, HEVC, AV1 | Yes | Yes |
|
||||
|
||||
### Checking Your Hardware
|
||||
|
||||
**Linux (VAAPI):**
|
||||
```bash
|
||||
# Check for AMD GPU
|
||||
lspci | grep -i amd
|
||||
|
||||
# Verify VAAPI support
|
||||
vainfo --display drm --device /dev/dri/renderD128
|
||||
|
||||
# Check available encoders
|
||||
ffmpeg -encoders | grep vaapi
|
||||
```
|
||||
|
||||
**Windows (AMF):**
|
||||
```bash
|
||||
# Check FFmpeg AMF encoders
|
||||
ffmpeg -encoders | grep amf
|
||||
```
|
||||
|
||||
Expected output (Linux):
|
||||
```
|
||||
V....D h264_vaapi H.264/AVC (VAAPI) (codec h264)
|
||||
V....D hevc_vaapi H.265/HEVC (VAAPI) (codec hevc)
|
||||
```
|
||||
|
||||
Expected output (Windows):
|
||||
```
|
||||
V....D h264_amf AMD AMF H.264 Encoder (codec h264)
|
||||
V....D hevc_amf AMD AMF HEVC encoder (codec hevc)
|
||||
```
|
||||
|
||||
## Linux Setup (VAAPI)
|
||||
|
||||
### Docker Installation
|
||||
|
||||
1. **Pass GPU devices:**
|
||||
```yaml
|
||||
services:
|
||||
alchemist:
|
||||
image: ghcr.io/bybrooklyn/alchemist:latest
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
group_add:
|
||||
- video
|
||||
- render
|
||||
```
|
||||
|
||||
2. **Verify GPU access:**
|
||||
```bash
|
||||
docker exec -it alchemist ls -la /dev/dri/
|
||||
# Should show renderD128 or similar
|
||||
```
|
||||
|
||||
### Binary Installation
|
||||
|
||||
1. **Install AMD drivers:**
|
||||
|
||||
**Ubuntu/Debian:**
|
||||
```bash
|
||||
# Add AMD GPU repository
|
||||
wget -q -O - https://repo.radeon.com/rocm/rocm.gpg.key | sudo apt-key add -
|
||||
echo 'deb [arch=amd64] https://repo.radeon.com/rocm/apt/debian/ focal main' | sudo tee /etc/apt/sources.list.d/rocm.list
|
||||
sudo apt update
|
||||
|
||||
# Install drivers
|
||||
sudo apt install rocm-dkms
|
||||
sudo apt install mesa-va-drivers
|
||||
```
|
||||
|
||||
**Fedora/RHEL:**
|
||||
```bash
|
||||
sudo dnf install mesa-va-drivers
|
||||
sudo dnf install libva-utils
|
||||
```
|
||||
|
||||
**Arch Linux:**
|
||||
```bash
|
||||
sudo pacman -S mesa-va-drivers
|
||||
sudo pacman -S libva-utils
|
||||
```
|
||||
|
||||
2. **Verify VAAPI:**
|
||||
```bash
|
||||
vainfo --display drm --device /dev/dri/renderD128
|
||||
```
|
||||
|
||||
3. **User permissions:**
|
||||
```bash
|
||||
sudo usermod -a -G video $USER
|
||||
sudo usermod -a -G render $USER
|
||||
# Log out and back in
|
||||
```
|
||||
|
||||
## Windows Setup (AMF)
|
||||
|
||||
### Requirements
|
||||
|
||||
1. **AMD Adrenalin drivers** (latest version recommended)
|
||||
2. **AMD AMF SDK** (included with drivers)
|
||||
3. **FFmpeg with AMF support**
|
||||
|
||||
### Installation
|
||||
|
||||
1. **Download AMD drivers:**
|
||||
- Visit [amd.com/support](https://amd.com/support)
|
||||
- Download latest Adrenalin drivers
|
||||
- Install with "Standard" or "Custom" installation
|
||||
|
||||
2. **Verify AMF support:**
|
||||
```cmd
|
||||
ffmpeg -encoders | findstr amf
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### In Alchemist
|
||||
|
||||
**Linux (VAAPI):**
|
||||
1. Navigate to **Settings** → **Hardware**
|
||||
2. Set **Preferred Vendor** to `amd`
|
||||
3. Set **Device Path** to `/dev/dri/renderD128`
|
||||
4. Verify "AMD VAAPI" appears in hardware status
|
||||
|
||||
**Windows (AMF):**
|
||||
1. Navigate to **Settings** → **Hardware**
|
||||
2. Set **Preferred Vendor** to `amd`
|
||||
3. Leave **Device Path** empty
|
||||
4. Verify "AMD AMF" appears in hardware status
|
||||
|
||||
### Quality Settings
|
||||
|
||||
AMD encoding quality varies by implementation:
|
||||
|
||||
**VAAPI Quality (Linux):**
|
||||
| Profile | Quality Level | Use Case |
|
||||
|---------|--------------|----------|
|
||||
| **Quality** | High | Best quality, slower |
|
||||
| **Balanced** | Medium | Good balance |
|
||||
| **Speed** | Fast | Faster encoding |
|
||||
|
||||
**AMF Quality (Windows):**
|
||||
| Profile | CRF/Quality | Use Case |
|
||||
|---------|-------------|----------|
|
||||
| **Quality** | 20-24 | Archive quality |
|
||||
| **Balanced** | 25-28 | General use |
|
||||
| **Speed** | 29-32 | Quick turnaround |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Linux (VAAPI)
|
||||
|
||||
#### "No VAAPI device found"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check GPU detection:**
|
||||
```bash
|
||||
lspci | grep -i vga
|
||||
dmesg | grep amdgpu
|
||||
```
|
||||
|
||||
2. **Verify device nodes:**
|
||||
```bash
|
||||
ls -la /dev/dri/
|
||||
# Should show renderD128, card0, etc.
|
||||
```
|
||||
|
||||
3. **Test VAAPI directly:**
|
||||
```bash
|
||||
vainfo --display drm --device /dev/dri/renderD128
|
||||
```
|
||||
|
||||
#### "VAAPI initialization failed"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Install mesa drivers:**
|
||||
```bash
|
||||
sudo apt install mesa-va-drivers libva-dev
|
||||
```
|
||||
|
||||
2. **Set environment variables:**
|
||||
```bash
|
||||
export LIBVA_DRIVER_NAME=radeonsi
|
||||
export LIBVA_DRIVERS_PATH=/usr/lib/x86_64-linux-gnu/dri
|
||||
```
|
||||
|
||||
3. **Check user groups:**
|
||||
```bash
|
||||
groups $USER
|
||||
# Should include 'video' and 'render'
|
||||
```
|
||||
|
||||
### Windows (AMF)
|
||||
|
||||
#### "AMF encoder not available"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Update AMD drivers:**
|
||||
- Download latest Adrenalin drivers
|
||||
- Use DDU (Display Driver Uninstaller) if needed
|
||||
|
||||
2. **Verify GPU detection:**
|
||||
```cmd
|
||||
dxdiag
|
||||
# Check Display tab for AMD GPU
|
||||
```
|
||||
|
||||
3. **Check Windows version:**
|
||||
- AMF requires Windows 10 or later
|
||||
- Update Windows if necessary
|
||||
|
||||
#### Poor quality output
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Adjust quality settings:**
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "quality"
|
||||
```
|
||||
|
||||
2. **Use constant quality mode:**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-rc", "cqp", "-qp_i", "22", "-qp_p", "24"]
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Linux Optimization
|
||||
|
||||
1. **Enable GPU scheduler:**
|
||||
```bash
|
||||
echo 'KERNEL=="card*", SUBSYSTEM=="drm", DRIVERS=="amdgpu", ATTR{device/power_dpm_force_performance_level}="high"' | sudo tee /etc/udev/rules.d/30-amdgpu-pm.rules
|
||||
sudo udevadm control --reload-rules
|
||||
```
|
||||
|
||||
2. **Optimize for encoding:**
|
||||
```bash
|
||||
echo high | sudo tee /sys/class/drm/card*/device/power_dpm_force_performance_level
|
||||
```
|
||||
|
||||
### Windows Optimization
|
||||
|
||||
1. **AMD Adrenalin settings:**
|
||||
- Open AMD Software
|
||||
- Graphics → Advanced → GPU Workload → "Compute"
|
||||
- Set Power Limit to maximum
|
||||
|
||||
2. **Registry optimizations:**
|
||||
```reg
|
||||
[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Class\{4d36e968-e325-11ce-bfc1-08002be10318}\0000]
|
||||
"PP_ThermalAutoThrottlingEnable"=dword:00000000
|
||||
```
|
||||
|
||||
### Concurrent Encoding
|
||||
|
||||
AMD GPUs generally support fewer concurrent streams than NVIDIA:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1 # Start with 1, test higher values
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Codec Selection
|
||||
|
||||
#### HEVC (Recommended)
|
||||
- Best quality/size ratio
|
||||
- Good AMD hardware support
|
||||
- Wide compatibility
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "hevc"
|
||||
quality_profile = "balanced"
|
||||
```
|
||||
|
||||
#### H.264 (Maximum compatibility)
|
||||
- Universal playback support
|
||||
- Fastest encoding
|
||||
- Larger file sizes
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
quality_profile = "speed"
|
||||
```
|
||||
|
||||
### Quality Settings
|
||||
|
||||
For best results with AMD encoding:
|
||||
|
||||
**Linux (VAAPI):**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-vaapi_device", "/dev/dri/renderD128", "-qp", "24"]
|
||||
```
|
||||
|
||||
**Windows (AMF):**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-usage", "transcoding", "-rc", "cqp", "-qp", "24"]
|
||||
```
|
||||
|
||||
### Thermal Management
|
||||
|
||||
AMD GPUs can run hot during extended encoding:
|
||||
|
||||
1. **Monitor temperatures:**
|
||||
```bash
|
||||
# Linux
|
||||
sensors
|
||||
|
||||
# Windows - Use MSI Afterburner or AMD Software
|
||||
```
|
||||
|
||||
2. **Adjust fan curves** in AMD Software
|
||||
|
||||
3. **Consider undervolting** for 24/7 operation
|
||||
|
||||
### Power Efficiency
|
||||
|
||||
For always-on systems:
|
||||
- Lower power limits in AMD Software
|
||||
- Use "Balanced" or "Speed" quality profiles
|
||||
- Enable power management features
|
||||
- Consider concurrent job limits
|
||||
|
||||
## Hardware-Specific Notes
|
||||
|
||||
### Older AMD Cards (Pre-RDNA)
|
||||
- Limited to H.264 encoding
|
||||
- Quality may be lower than modern cards
|
||||
- Consider CPU fallback for critical content
|
||||
|
||||
### APUs (Integrated Graphics)
|
||||
- Share system memory
|
||||
- Thermal constraints in compact systems
|
||||
- Good for low-power applications
|
||||
|
||||
### High-end Cards (RX 6000/7000)
|
||||
- Excellent encoding performance
|
||||
- Support for modern codecs
|
||||
- May require adequate cooling
|
||||
367
docs/src/content/docs/hardware/apple.md
Normal file
367
docs/src/content/docs/hardware/apple.md
Normal file
@@ -0,0 +1,367 @@
|
||||
---
|
||||
title: Apple VideoToolbox Setup
|
||||
description: Complete guide to setting up Apple VideoToolbox hardware acceleration on macOS.
|
||||
---
|
||||
|
||||
# Apple VideoToolbox Setup
|
||||
|
||||
Apple VideoToolbox provides hardware-accelerated encoding on macOS using built-in media engines in Apple Silicon and Intel Macs. This is the most efficient option for macOS users.
|
||||
|
||||
## Supported Hardware
|
||||
|
||||
VideoToolbox support varies by Mac model and codec:
|
||||
|
||||
| Hardware | H.264 | HEVC | AV1 | Notes |
|
||||
|----------|-------|------|-----|-------|
|
||||
| **Intel Macs (2016+)** | ✅ | ✅ | ❌ | Requires T2 chip for HEVC |
|
||||
| **Apple Silicon M1** | ✅ | ✅ | ❌ | Dedicated media engines |
|
||||
| **Apple Silicon M2** | ✅ | ✅ | ❌ | Enhanced media engines |
|
||||
| **Apple Silicon M3** | ✅ | ✅ | ✅ | AV1 encode/decode support |
|
||||
|
||||
### Checking Your Hardware
|
||||
|
||||
Verify VideoToolbox support:
|
||||
```bash
|
||||
# Check available encoders
|
||||
ffmpeg -encoders | grep videotoolbox
|
||||
|
||||
# System information
|
||||
system_profiler SPHardwareDataType
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
V....D h264_videotoolbox VideoToolbox H.264 Encoder (codec h264)
|
||||
V....D hevc_videotoolbox VideoToolbox H.265 Encoder (codec hevc)
|
||||
```
|
||||
|
||||
On M3 Macs:
|
||||
```
|
||||
V....D av1_videotoolbox VideoToolbox AV1 Encoder (codec av1)
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### Docker Setup
|
||||
|
||||
Running Docker on macOS with VideoToolbox requires special configuration:
|
||||
|
||||
```yaml
|
||||
version: '3.8'
|
||||
services:
|
||||
alchemist:
|
||||
image: ghcr.io/bybrooklyn/alchemist:latest
|
||||
container_name: alchemist
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./config:/app/config
|
||||
- ./data:/app/data
|
||||
- /path/to/media:/media
|
||||
environment:
|
||||
- ALCHEMIST_CONFIG_PATH=/app/config/config.toml
|
||||
- ALCHEMIST_DB_PATH=/app/data/alchemist.db
|
||||
# VideoToolbox access from container is limited
|
||||
# Binary installation recommended for best results
|
||||
```
|
||||
|
||||
⚠️ **Note**: Docker containers on macOS have limited access to VideoToolbox. Binary installation is recommended for optimal performance.
|
||||
|
||||
### Binary Installation (Recommended)
|
||||
|
||||
1. **Download Alchemist binary** for macOS from [GitHub Releases](https://github.com/bybrooklyn/alchemist/releases)
|
||||
|
||||
2. **Install FFmpeg with VideoToolbox:**
|
||||
```bash
|
||||
# Using Homebrew (recommended)
|
||||
brew install ffmpeg
|
||||
|
||||
# Verify VideoToolbox support
|
||||
ffmpeg -encoders | grep videotoolbox
|
||||
```
|
||||
|
||||
3. **Run Alchemist:**
|
||||
```bash
|
||||
chmod +x alchemist-macos
|
||||
./alchemist-macos
|
||||
```
|
||||
|
||||
### Build from Source
|
||||
|
||||
For the latest features:
|
||||
```bash
|
||||
# Install Rust
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
|
||||
# Clone and build
|
||||
git clone https://github.com/bybrooklyn/alchemist.git
|
||||
cd alchemist
|
||||
cargo build --release
|
||||
|
||||
./target/release/alchemist
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### In Alchemist
|
||||
|
||||
1. Navigate to **Settings** → **Hardware**
|
||||
2. Set **Preferred Vendor** to `apple`
|
||||
3. Leave **Device Path** empty (not applicable)
|
||||
4. Verify "Apple VideoToolbox" appears in hardware status
|
||||
|
||||
### Quality Settings
|
||||
|
||||
VideoToolbox uses quality values (higher = better quality):
|
||||
|
||||
| Profile | Quality Value | Use Case |
|
||||
|---------|--------------|----------|
|
||||
| **Quality** | 55 | Best quality, larger files |
|
||||
| **Balanced** | 65 | Good balance |
|
||||
| **Speed** | 75 | Faster encoding, smaller quality |
|
||||
|
||||
### Codec-Specific Configuration
|
||||
|
||||
#### HEVC (Recommended)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "hevc"
|
||||
quality_profile = "balanced"
|
||||
|
||||
[hardware]
|
||||
preferred_vendor = "apple"
|
||||
```
|
||||
|
||||
#### H.264 (Maximum compatibility)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
quality_profile = "speed" # H.264 encodes quickly
|
||||
```
|
||||
|
||||
#### AV1 (M3 only)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "av1"
|
||||
quality_profile = "quality" # AV1 benefits from higher quality
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "VideoToolbox encoder not available"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check macOS version:**
|
||||
- macOS 10.13+ required for HEVC
|
||||
- macOS 14+ required for AV1 (M3 only)
|
||||
|
||||
2. **Verify hardware support:**
|
||||
```bash
|
||||
system_profiler SPHardwareDataType | grep "Model Identifier"
|
||||
```
|
||||
|
||||
3. **Test FFmpeg directly:**
|
||||
```bash
|
||||
ffmpeg -f lavfi -i testsrc=duration=10:size=1920x1080 -c:v hevc_videotoolbox -t 5 test.mov
|
||||
```
|
||||
|
||||
### Poor quality output
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Adjust quality settings:**
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "quality"
|
||||
```
|
||||
|
||||
2. **Use lower quality values (better quality):**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-q:v", "50"] # Lower = better quality
|
||||
```
|
||||
|
||||
3. **Enable constant quality mode:**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-b:v", "0"] # Forces constant quality
|
||||
```
|
||||
|
||||
### Slow encoding performance
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check thermal throttling:**
|
||||
```bash
|
||||
# Monitor CPU temperature
|
||||
sudo powermetrics --samplers smc -n 1
|
||||
```
|
||||
|
||||
2. **Adjust concurrent jobs:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1 # Start with 1 on MacBooks
|
||||
```
|
||||
|
||||
3. **Optimize for battery/thermal:**
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "speed"
|
||||
threads = 4 # Limit CPU usage
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Apple Silicon Optimization
|
||||
|
||||
Apple Silicon Macs have dedicated media engines:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 2 # M1/M2 can handle 2 concurrent streams
|
||||
quality_profile = "balanced"
|
||||
```
|
||||
|
||||
**M3 Macs** with enhanced engines:
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 3 # M3 Pro/Max can handle more
|
||||
output_codec = "av1" # Take advantage of AV1 support
|
||||
```
|
||||
|
||||
### Intel Mac Optimization
|
||||
|
||||
Intel Macs rely on CPU + T2 chip:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1 # Conservative for thermal management
|
||||
threads = 8 # Use available CPU cores
|
||||
quality_profile = "balanced"
|
||||
```
|
||||
|
||||
### Battery Life (MacBooks)
|
||||
|
||||
For better battery life during encoding:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "speed"
|
||||
concurrent_jobs = 1
|
||||
threads = 4
|
||||
|
||||
[schedule]
|
||||
# Only encode when plugged in
|
||||
[[schedule.windows]]
|
||||
start_time = "22:00"
|
||||
end_time = "06:00"
|
||||
enabled = true
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Thermal Management
|
||||
|
||||
**MacBooks** (especially Intel models) can throttle during extended encoding:
|
||||
|
||||
1. **Monitor temperatures:**
|
||||
```bash
|
||||
# Install temperature monitoring
|
||||
brew install stats
|
||||
```
|
||||
|
||||
2. **Use clamshell mode** when possible (better cooling)
|
||||
|
||||
3. **External cooling** for extended sessions
|
||||
|
||||
4. **Lower quality profiles** for bulk operations
|
||||
|
||||
### Power Management
|
||||
|
||||
**Battery considerations:**
|
||||
- Use "Speed" profile on battery
|
||||
- Schedule encoding for AC power
|
||||
- Monitor battery usage in Activity Monitor
|
||||
|
||||
**Desktop Macs:**
|
||||
- Can sustain higher workloads
|
||||
- Better thermal management
|
||||
- Support for longer concurrent jobs
|
||||
|
||||
### Codec Selection
|
||||
|
||||
#### For M3 Macs (AV1 support)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "av1"
|
||||
quality_profile = "quality"
|
||||
# Best compression, future-proof
|
||||
```
|
||||
|
||||
#### For M1/M2 Macs
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "hevc"
|
||||
quality_profile = "balanced"
|
||||
# Excellent efficiency, wide support
|
||||
```
|
||||
|
||||
#### For older Intel Macs
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
quality_profile = "speed"
|
||||
# Most compatible, least thermal stress
|
||||
```
|
||||
|
||||
### Quality vs. Speed
|
||||
|
||||
**Archive quality** (slow but excellent):
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "quality"
|
||||
concurrent_jobs = 1
|
||||
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-q:v", "45"]
|
||||
```
|
||||
|
||||
**Balanced performance** (recommended):
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "balanced"
|
||||
concurrent_jobs = 2 # Apple Silicon only
|
||||
```
|
||||
|
||||
**Fast turnaround** (quick results):
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "speed"
|
||||
concurrent_jobs = 1
|
||||
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-q:v", "75"]
|
||||
```
|
||||
|
||||
## Hardware-Specific Notes
|
||||
|
||||
### MacBook Air
|
||||
- **Fanless design** limits sustained performance
|
||||
- Use conservative settings for long encodes
|
||||
- Monitor thermal throttling
|
||||
|
||||
### MacBook Pro
|
||||
- **Better cooling** supports higher workloads
|
||||
- 14"/16" models handle concurrent jobs better
|
||||
- Intel models may need thermal management
|
||||
|
||||
### Mac Studio/Pro
|
||||
- **Excellent cooling** for sustained workloads
|
||||
- Can handle maximum concurrent jobs
|
||||
- Ideal for bulk transcoding operations
|
||||
|
||||
### Mac mini
|
||||
- **Good performance** but compact thermal design
|
||||
- Monitor temperatures during heavy use
|
||||
- Balance between performance and heat
|
||||
378
docs/src/content/docs/hardware/cpu.md
Normal file
378
docs/src/content/docs/hardware/cpu.md
Normal file
@@ -0,0 +1,378 @@
|
||||
---
|
||||
title: CPU Encoding (Software Fallback)
|
||||
description: Guide to CPU-based software encoding when hardware acceleration is unavailable.
|
||||
---
|
||||
|
||||
# CPU Encoding (Software Fallback)
|
||||
|
||||
When hardware acceleration isn't available or enabled, Alchemist falls back to CPU-based software encoding. While slower than GPU acceleration, modern CPUs can produce excellent quality results.
|
||||
|
||||
## Supported Encoders
|
||||
|
||||
Alchemist uses these high-quality software encoders:
|
||||
|
||||
| Codec | Encoder | Quality | Speed | Use Case |
|
||||
|-------|---------|---------|-------|----------|
|
||||
| **AV1** | SVT-AV1 | Excellent | Medium | Future-proof archival |
|
||||
| **AV1** | libaom-av1 | Best | Slow | Maximum quality |
|
||||
| **HEVC** | x265 | Excellent | Medium | General purpose |
|
||||
| **H.264** | x264 | Very Good | Fast | Compatibility |
|
||||
|
||||
### Checking CPU Encoders
|
||||
|
||||
Verify available software encoders:
|
||||
```bash
|
||||
ffmpeg -encoders | grep -E "libsvtav1|libaom|libx265|libx264"
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
V....D libsvtav1 SVT-AV1(Scalable Video Technology for AV1) encoder (codec av1)
|
||||
V....D libaom-av1 libaom AV1 (codec av1)
|
||||
V....D libx265 libx265 H.265 / HEVC (codec hevc)
|
||||
V....D libx264 libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (codec h264)
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Enabling CPU Encoding
|
||||
|
||||
```toml
|
||||
[hardware]
|
||||
preferred_vendor = "cpu" # Force CPU encoding
|
||||
allow_cpu_encoding = true
|
||||
cpu_preset = "medium"
|
||||
|
||||
[transcode]
|
||||
output_codec = "av1" # Recommended for CPU encoding
|
||||
quality_profile = "balanced"
|
||||
```
|
||||
|
||||
### CPU Presets
|
||||
|
||||
CPU presets balance encoding speed vs. quality:
|
||||
|
||||
| Preset | SVT-AV1 | x265 | x264 | Use Case |
|
||||
|--------|---------|------|------|----------|
|
||||
| **Slow** | 4 | slow | slow | Maximum quality, archival |
|
||||
| **Medium** | 8 | medium | medium | Balanced performance |
|
||||
| **Fast** | 12 | fast | fast | Quick turnaround |
|
||||
| **Faster** | 13 | faster | faster | Speed priority |
|
||||
|
||||
### Quality Settings
|
||||
|
||||
Quality profiles adjust CRF (Constant Rate Factor) values:
|
||||
|
||||
| Profile | AV1 CRF | HEVC CRF | H.264 CRF | File Size | Quality |
|
||||
|---------|---------|----------|-----------|-----------|---------|
|
||||
| **Quality** | 24 | 22 | 20 | Larger | Best |
|
||||
| **Balanced** | 28 | 26 | 23 | Medium | Good |
|
||||
| **Speed** | 32 | 30 | 26 | Smaller | Acceptable |
|
||||
|
||||
### Thread Configuration
|
||||
|
||||
Optimize CPU thread usage:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
threads = 0 # Auto-detect (recommended)
|
||||
# Or set manually:
|
||||
# threads = 8 # Use 8 threads per job
|
||||
|
||||
concurrent_jobs = 1 # Start with 1, increase carefully
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Thread Allocation
|
||||
|
||||
**Rule of thumb**: Total threads = cores × concurrent_jobs
|
||||
|
||||
| CPU Cores | Suggested Config |
|
||||
|-----------|------------------|
|
||||
| **4 cores** | 1 job, 4 threads |
|
||||
| **8 cores** | 1 job, 8 threads or 2 jobs, 4 threads each |
|
||||
| **16 cores** | 2 jobs, 8 threads each |
|
||||
| **32+ cores** | 4 jobs, 8 threads each |
|
||||
|
||||
```toml
|
||||
# Example: 16-core CPU
|
||||
[transcode]
|
||||
concurrent_jobs = 2
|
||||
threads = 8
|
||||
```
|
||||
|
||||
### Memory Considerations
|
||||
|
||||
Software encoding is memory-intensive:
|
||||
|
||||
| Resolution | Recommended RAM per Job |
|
||||
|------------|------------------------|
|
||||
| **1080p** | 4-6 GB |
|
||||
| **1440p** | 6-8 GB |
|
||||
| **4K** | 8-12 GB |
|
||||
|
||||
```toml
|
||||
# Adjust jobs based on available RAM
|
||||
[transcode]
|
||||
concurrent_jobs = 1 # Conservative for 16GB systems
|
||||
```
|
||||
|
||||
### Codec-Specific Optimization
|
||||
|
||||
#### AV1 (SVT-AV1) - Recommended
|
||||
Best compression efficiency for CPU encoding:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "av1"
|
||||
quality_profile = "balanced"
|
||||
|
||||
[transcode.encoder_args]
|
||||
extra_args = [
|
||||
"-preset", "8", # Good speed/quality balance
|
||||
"-crf", "28", # Quality level
|
||||
"-svtav1-params", "tune=0:enable-overlays=1"
|
||||
]
|
||||
```
|
||||
|
||||
#### HEVC (x265)
|
||||
Good balance of quality and compatibility:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "hevc"
|
||||
quality_profile = "balanced"
|
||||
|
||||
[transcode.encoder_args]
|
||||
extra_args = [
|
||||
"-preset", "medium",
|
||||
"-crf", "26",
|
||||
"-x265-params", "log-level=error"
|
||||
]
|
||||
```
|
||||
|
||||
#### H.264 (x264)
|
||||
Fastest software encoding:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
quality_profile = "speed"
|
||||
|
||||
[transcode.encoder_args]
|
||||
extra_args = [
|
||||
"-preset", "fast",
|
||||
"-crf", "23"
|
||||
]
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Two-Pass Encoding
|
||||
|
||||
For maximum quality (much slower):
|
||||
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
# AV1 two-pass
|
||||
extra_args = [
|
||||
"-pass", "1", "-an", "-f", "null", "/dev/null", "&&",
|
||||
"-pass", "2"
|
||||
]
|
||||
```
|
||||
|
||||
### Quality-Based Encoding
|
||||
|
||||
Use different quality for different content:
|
||||
|
||||
```toml
|
||||
# High quality for movies
|
||||
[profiles.movies]
|
||||
quality_profile = "quality"
|
||||
output_codec = "av1"
|
||||
|
||||
# Faster for TV shows
|
||||
[profiles.tv]
|
||||
quality_profile = "speed"
|
||||
output_codec = "hevc"
|
||||
```
|
||||
|
||||
### Grain Synthesis (AV1)
|
||||
|
||||
Preserve film grain efficiently:
|
||||
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-svtav1-params", "film-grain=50"]
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### High CPU Usage
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Reduce concurrent jobs:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1
|
||||
```
|
||||
|
||||
2. **Lower thread count:**
|
||||
```toml
|
||||
[transcode]
|
||||
threads = 4 # Use fewer threads
|
||||
```
|
||||
|
||||
3. **Use faster presets:**
|
||||
```toml
|
||||
[hardware]
|
||||
cpu_preset = "fast"
|
||||
```
|
||||
|
||||
### Out of Memory Errors
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Reduce concurrent jobs:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1
|
||||
```
|
||||
|
||||
2. **Close other applications** during encoding
|
||||
|
||||
3. **Use H.264** instead of AV1/HEVC:
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
```
|
||||
|
||||
### Slow Encoding Speed
|
||||
|
||||
**Expected encoding speeds** (1080p content):
|
||||
|
||||
| Codec | Preset | Typical Speed |
|
||||
|-------|--------|---------------|
|
||||
| **AV1** | Medium | 0.5-1.5x realtime |
|
||||
| **HEVC** | Medium | 1-3x realtime |
|
||||
| **H.264** | Medium | 3-8x realtime |
|
||||
|
||||
**Solutions for slow speeds:**
|
||||
|
||||
1. **Use faster presets:**
|
||||
```toml
|
||||
[hardware]
|
||||
cpu_preset = "fast"
|
||||
```
|
||||
|
||||
2. **Switch codecs:**
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264" # Fastest
|
||||
```
|
||||
|
||||
3. **Verify CPU boost** is working:
|
||||
```bash
|
||||
# Linux
|
||||
cat /proc/cpuinfo | grep MHz
|
||||
|
||||
# macOS
|
||||
sysctl -a | grep freq
|
||||
```
|
||||
|
||||
### Quality Issues
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Lower CRF values** (better quality):
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-crf", "24"] # Lower = better quality
|
||||
```
|
||||
|
||||
2. **Use slower presets:**
|
||||
```toml
|
||||
[hardware]
|
||||
cpu_preset = "slow"
|
||||
```
|
||||
|
||||
3. **Enable quality features:**
|
||||
```toml
|
||||
# x265 example
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-x265-params", "aq-mode=3:aq-strength=1.0"]
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### When to Use CPU Encoding
|
||||
|
||||
**Ideal scenarios:**
|
||||
- No compatible GPU available
|
||||
- Maximum quality requirements
|
||||
- Small batch processing
|
||||
- Development/testing
|
||||
|
||||
**Consider GPU instead when:**
|
||||
- Processing large libraries
|
||||
- Speed is priority
|
||||
- Running 24/7 operations
|
||||
- High resolution content (4K+)
|
||||
|
||||
### Quality vs. Speed Trade-offs
|
||||
|
||||
**Maximum quality** (archival):
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "av1"
|
||||
quality_profile = "quality"
|
||||
concurrent_jobs = 1
|
||||
|
||||
[hardware]
|
||||
cpu_preset = "slow"
|
||||
```
|
||||
|
||||
**Balanced performance** (recommended):
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "hevc"
|
||||
quality_profile = "balanced"
|
||||
concurrent_jobs = 2 # Adjust for your CPU
|
||||
|
||||
[hardware]
|
||||
cpu_preset = "medium"
|
||||
```
|
||||
|
||||
**Speed priority** (quick results):
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
quality_profile = "speed"
|
||||
concurrent_jobs = 4 # More jobs, fewer threads each
|
||||
|
||||
[hardware]
|
||||
cpu_preset = "fast"
|
||||
```
|
||||
|
||||
### System Optimization
|
||||
|
||||
**Linux optimizations:**
|
||||
```bash
|
||||
# Set CPU governor to performance
|
||||
echo performance | sudo tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||
|
||||
# Disable CPU frequency scaling
|
||||
echo 1 | sudo tee /sys/devices/system/cpu/intel_pstate/no_turbo
|
||||
```
|
||||
|
||||
**Windows optimizations:**
|
||||
- Set power plan to "High Performance"
|
||||
- Disable CPU parking in registry
|
||||
- Close unnecessary background apps
|
||||
|
||||
**macOS optimizations:**
|
||||
- Use Activity Monitor to verify CPU usage
|
||||
- Close other intensive applications
|
||||
- Consider thermal throttling on laptops
|
||||
316
docs/src/content/docs/hardware/intel.md
Normal file
316
docs/src/content/docs/hardware/intel.md
Normal file
@@ -0,0 +1,316 @@
|
||||
---
|
||||
title: Intel QSV Setup
|
||||
description: Complete guide to setting up Intel Quick Sync Video (QSV) hardware acceleration.
|
||||
---
|
||||
|
||||
# Intel Quick Sync Video (QSV) Setup
|
||||
|
||||
Intel Quick Sync Video provides excellent hardware acceleration with low power consumption. Available on most Intel CPUs with integrated graphics since Sandy Bridge (2011).
|
||||
|
||||
## Supported Hardware
|
||||
|
||||
QSV is available on Intel processors with integrated graphics:
|
||||
|
||||
| Generation | Codecs | Performance |
|
||||
|------------|---------|------------|
|
||||
| **Sandy Bridge (2nd gen)** | H.264 | Basic support |
|
||||
| **Ivy Bridge (3rd gen)** | H.264 | Improved quality |
|
||||
| **Haswell (4th gen)** | H.264 | Better efficiency |
|
||||
| **Broadwell (5th gen)** | H.264, HEVC (decode only) | Low power |
|
||||
| **Skylake (6th gen)** | H.264, HEVC | HEVC encoding support |
|
||||
| **Kaby Lake (7th gen)** | H.264, HEVC | Enhanced quality |
|
||||
| **Coffee Lake (8th-10th gen)** | H.264, HEVC | Improved performance |
|
||||
| **Tiger Lake (11th gen)** | H.264, HEVC, AV1 (decode) | AV1 hardware decode |
|
||||
| **Alder Lake (12th gen)** | H.264, HEVC, AV1 | Full AV1 encode/decode |
|
||||
| **Raptor Lake (13th gen)** | H.264, HEVC, AV1 | Enhanced AV1 performance |
|
||||
|
||||
### Checking Your Hardware
|
||||
|
||||
Verify QSV support:
|
||||
```bash
|
||||
# Check for Intel GPU
|
||||
lspci | grep -i intel
|
||||
|
||||
# Look for iGPU device files
|
||||
ls -la /dev/dri/
|
||||
|
||||
# Check FFmpeg QSV encoders
|
||||
ffmpeg -encoders | grep qsv
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
V....D av1_qsv Intel AV1 encoder (Intel Quick Sync Video acceleration) (codec av1)
|
||||
V....D h264_qsv Intel H.264 encoder (Intel Quick Sync Video acceleration) (codec h264)
|
||||
V....D hevc_qsv Intel HEVC encoder (Intel Quick Sync Video acceleration) (codec hevc)
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### Docker Setup (Recommended)
|
||||
|
||||
1. **Pass GPU devices to container:**
|
||||
```yaml
|
||||
services:
|
||||
alchemist:
|
||||
image: ghcr.io/bybrooklyn/alchemist:latest
|
||||
devices:
|
||||
- /dev/dri:/dev/dri
|
||||
group_add:
|
||||
- video # or render group
|
||||
```
|
||||
|
||||
2. **Verify access inside container:**
|
||||
```bash
|
||||
docker exec -it alchemist ls -la /dev/dri/
|
||||
```
|
||||
|
||||
Should show devices like:
|
||||
```
|
||||
renderD128
|
||||
card0
|
||||
```
|
||||
|
||||
### Binary Installation
|
||||
|
||||
1. **Install Intel GPU drivers:**
|
||||
|
||||
**Ubuntu/Debian:**
|
||||
```bash
|
||||
# Intel GPU drivers
|
||||
sudo apt install intel-media-va-driver-non-free
|
||||
sudo apt install vainfo
|
||||
|
||||
# Verify VAAPI support
|
||||
vainfo
|
||||
```
|
||||
|
||||
**Fedora/RHEL:**
|
||||
```bash
|
||||
sudo dnf install intel-media-driver
|
||||
sudo dnf install libva-utils
|
||||
vainfo
|
||||
```
|
||||
|
||||
**Arch Linux:**
|
||||
```bash
|
||||
sudo pacman -S intel-media-driver
|
||||
sudo pacman -S libva-utils
|
||||
vainfo
|
||||
```
|
||||
|
||||
2. **Install FFmpeg with QSV:**
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt install ffmpeg
|
||||
|
||||
# Verify QSV support
|
||||
ffmpeg -encoders | grep qsv
|
||||
```
|
||||
|
||||
3. **User permissions:**
|
||||
```bash
|
||||
# Add user to video/render group
|
||||
sudo usermod -a -G video $USER
|
||||
sudo usermod -a -G render $USER
|
||||
# Log out and back in
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### In Alchemist
|
||||
|
||||
1. Navigate to **Settings** → **Hardware**
|
||||
2. Set **Preferred Vendor** to `intel`
|
||||
3. Set **Device Path** to `/dev/dri/renderD128` (or auto-detect)
|
||||
4. Verify detection shows "Intel QSV"
|
||||
|
||||
### Quality Settings
|
||||
|
||||
QSV uses global quality values (lower = better quality):
|
||||
|
||||
| Profile | Quality Value | Use Case |
|
||||
|---------|--------------|----------|
|
||||
| **Quality** | 20 | Best quality, slower |
|
||||
| **Balanced** | 25 | Good balance |
|
||||
| **Speed** | 30 | Faster encoding |
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
```toml
|
||||
[hardware]
|
||||
preferred_vendor = "intel"
|
||||
device_path = "/dev/dri/renderD128"
|
||||
|
||||
[transcode]
|
||||
quality_profile = "balanced"
|
||||
output_codec = "hevc"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "No QSV capable devices found"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check iGPU is enabled in BIOS:**
|
||||
- Enable "Intel Graphics" or "Internal Graphics"
|
||||
- Set "Primary Display" to "Auto" or "Intel"
|
||||
|
||||
2. **Verify device nodes:**
|
||||
```bash
|
||||
ls -la /dev/dri/
|
||||
stat /dev/dri/renderD128
|
||||
```
|
||||
|
||||
3. **Check user permissions:**
|
||||
```bash
|
||||
groups $USER
|
||||
# Should include 'video' or 'render'
|
||||
```
|
||||
|
||||
### "VAAPI initialization failed"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Install VAAPI drivers:**
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt install i965-va-driver intel-media-va-driver-non-free
|
||||
|
||||
# Test VAAPI
|
||||
vainfo --display drm --device /dev/dri/renderD128
|
||||
```
|
||||
|
||||
2. **Check environment variables:**
|
||||
```bash
|
||||
export LIBVA_DRIVER_NAME=iHD # or i965 for older hardware
|
||||
export LIBVA_DRIVERS_PATH=/usr/lib/x86_64-linux-gnu/dri
|
||||
```
|
||||
|
||||
### Poor Performance
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Enable look-ahead:**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-look_ahead", "1"]
|
||||
```
|
||||
|
||||
2. **Adjust quality:**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
global_quality = "23" # Lower for better quality
|
||||
```
|
||||
|
||||
3. **Check thermal throttling:**
|
||||
```bash
|
||||
# Monitor CPU/GPU temperatures
|
||||
sensors
|
||||
```
|
||||
|
||||
### Quality Issues
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Use higher quality settings:**
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "quality"
|
||||
```
|
||||
|
||||
2. **Enable B-frames:**
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-bf", "3", "-b_strategy", "1"]
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Power Efficiency
|
||||
|
||||
Intel QSV excels at power-efficient encoding:
|
||||
|
||||
- **Ultra-low power**: Perfect for NAS/always-on systems
|
||||
- **Thermal management**: Runs cooler than dedicated GPUs
|
||||
- **Concurrent streams**: Most iGPUs support 2-3 simultaneous encodes
|
||||
|
||||
### Memory Usage
|
||||
|
||||
Intel iGPU shares system RAM:
|
||||
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 2 # Safe for most systems
|
||||
threads = 4 # Reasonable CPU usage
|
||||
```
|
||||
|
||||
### Quality Tuning
|
||||
|
||||
For best quality with QSV:
|
||||
|
||||
```toml
|
||||
[transcode.encoder_args]
|
||||
# HEVC-specific optimizations
|
||||
extra_args = [
|
||||
"-global_quality", "22",
|
||||
"-look_ahead", "1",
|
||||
"-bf", "3",
|
||||
"-refs", "3"
|
||||
]
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Hardware Selection
|
||||
|
||||
- **Dedicated GPU slot**: Keep iGPU enabled even with dedicated GPU
|
||||
- **Memory allocation**: Ensure adequate RAM for shared graphics
|
||||
- **BIOS settings**: Enable iGPU for maximum compatibility
|
||||
|
||||
### Operating System
|
||||
|
||||
- **Linux**: Best support, low overhead
|
||||
- **Windows**: Good support with Intel drivers
|
||||
- **Headless operation**: Works without monitor connected
|
||||
|
||||
### Codec Selection
|
||||
|
||||
#### AV1 (12th gen+)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "av1"
|
||||
quality_profile = "quality" # AV1 benefits from higher quality settings
|
||||
```
|
||||
|
||||
#### HEVC (6th gen+)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "hevc"
|
||||
quality_profile = "balanced" # Good balance of speed/quality
|
||||
```
|
||||
|
||||
#### H.264 (All generations)
|
||||
```toml
|
||||
[transcode]
|
||||
output_codec = "h264"
|
||||
quality_profile = "speed" # H.264 encodes quickly
|
||||
```
|
||||
|
||||
## Hardware-Specific Notes
|
||||
|
||||
### NUCs and Mini PCs
|
||||
- Excellent for dedicated transcoding appliances
|
||||
- Low power consumption
|
||||
- Passive cooling options available
|
||||
|
||||
### Server CPUs
|
||||
- Xeon processors often lack iGPU
|
||||
- Check specifications before purchase
|
||||
- Consider discrete GPU for servers
|
||||
|
||||
### Laptops
|
||||
- May have power/thermal limitations
|
||||
- Consider reducing concurrent jobs
|
||||
- Monitor temperatures during extended use
|
||||
195
docs/src/content/docs/hardware/nvidia.md
Normal file
195
docs/src/content/docs/hardware/nvidia.md
Normal file
@@ -0,0 +1,195 @@
|
||||
---
|
||||
title: NVIDIA (NVENC) Setup
|
||||
description: Complete guide to setting up NVIDIA NVENC hardware acceleration with Alchemist.
|
||||
---
|
||||
|
||||
# NVIDIA NVENC Setup
|
||||
|
||||
NVIDIA GPUs provide excellent hardware-accelerated encoding via NVENC. This guide covers setup, troubleshooting, and optimization for NVIDIA graphics cards.
|
||||
|
||||
## Supported Hardware
|
||||
|
||||
NVENC is available on most modern NVIDIA GPUs:
|
||||
|
||||
| Generation | Codecs | Notes |
|
||||
|------------|---------|-------|
|
||||
| **Pascal (GTX 10-series)** | H.264, HEVC | 2 concurrent streams max |
|
||||
| **Turing (GTX 16/RTX 20-series)** | H.264, HEVC | 3 concurrent streams, improved quality |
|
||||
| **Ampere (RTX 30-series)** | H.264, HEVC, AV1 | Best performance, AV1 support |
|
||||
| **Ada Lovelace (RTX 40-series)** | H.264, HEVC, AV1 | Dual AV1 encoders, best efficiency |
|
||||
|
||||
### Checking Your Hardware
|
||||
|
||||
Verify NVENC support:
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
Check available encoders in FFmpeg:
|
||||
```bash
|
||||
ffmpeg -encoders | grep nvenc
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
V....D av1_nvenc NVIDIA NVENC av1 encoder (codec av1)
|
||||
V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264)
|
||||
V....D hevc_nvenc NVIDIA NVENC hevc encoder (codec hevc)
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### Docker Setup (Recommended)
|
||||
|
||||
1. **Install NVIDIA Container Toolkit** on your host:
|
||||
|
||||
**Ubuntu/Debian:**
|
||||
```bash
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
|
||||
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
|
||||
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
**RHEL/CentOS/Fedora:**
|
||||
```bash
|
||||
curl -s -L https://nvidia.github.io/nvidia-docker/centos7/nvidia-docker.repo | \
|
||||
sudo tee /etc/yum.repos.d/nvidia-docker.repo
|
||||
sudo yum install -y nvidia-container-toolkit
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
2. **Update Docker Compose:**
|
||||
```yaml
|
||||
services:
|
||||
alchemist:
|
||||
image: ghcr.io/bybrooklyn/alchemist:latest
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
```
|
||||
|
||||
3. **Test GPU Access:**
|
||||
```bash
|
||||
docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi
|
||||
```
|
||||
|
||||
### Binary Installation
|
||||
|
||||
For binary installations, ensure:
|
||||
1. **NVIDIA drivers** are installed and up-to-date
|
||||
2. **CUDA toolkit** (optional, for development)
|
||||
3. **FFmpeg with NVENC support**
|
||||
|
||||
#### Installing FFmpeg with NVENC
|
||||
|
||||
**Ubuntu/Debian:**
|
||||
```bash
|
||||
sudo apt update
|
||||
sudo apt install ffmpeg
|
||||
# Verify NVENC support
|
||||
ffmpeg -encoders | grep nvenc
|
||||
```
|
||||
|
||||
**From Source:**
|
||||
```bash
|
||||
git clone https://git.ffmpeg.org/ffmpeg.git
|
||||
cd ffmpeg
|
||||
./configure --enable-cuda --enable-nvenc --enable-nonfree
|
||||
make -j$(nproc)
|
||||
sudo make install
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### In Alchemist
|
||||
|
||||
1. Navigate to **Settings** → **Hardware**
|
||||
2. Set **Preferred Vendor** to `nvidia`
|
||||
3. Leave **Device Path** empty (auto-detect)
|
||||
4. Verify detection in the hardware status section
|
||||
|
||||
### Quality Settings
|
||||
|
||||
NVENC quality is controlled by presets and CQ (Constant Quality) values:
|
||||
|
||||
| Profile | NVENC Preset | CQ Value | Use Case |
|
||||
|---------|-------------|----------|----------|
|
||||
| **Quality** | `p7` | 20-24 | Archival, slow encodes |
|
||||
| **Balanced** | `p4` | 25-28 | General purpose |
|
||||
| **Speed** | `p1` | 30-35 | Fast turnaround |
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "No NVENC capable devices found"
|
||||
|
||||
**Causes:**
|
||||
- GPU drivers not installed
|
||||
- Container can't access GPU
|
||||
- Unsupported GPU model
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check drivers:**
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
2. **Verify container access:**
|
||||
```bash
|
||||
docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi
|
||||
```
|
||||
|
||||
### "NVENC encoder failed to initialize"
|
||||
|
||||
**Common causes:**
|
||||
- All encode sessions in use
|
||||
- Insufficient GPU memory
|
||||
- Driver version mismatch
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Reduce concurrent jobs:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1
|
||||
```
|
||||
|
||||
2. **Check GPU memory:**
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Quality Testing**: Always test quality before bulk transcoding
|
||||
2. **Temperature Monitoring**: Keep GPU temperatures under 83°C
|
||||
3. **Driver Updates**: Update drivers regularly for bug fixes
|
||||
4. **Backup Strategy**: Keep originals until quality is verified
|
||||
|
||||
## Codec Recommendations
|
||||
|
||||
### AV1 (RTX 30/40 series)
|
||||
- Best compression
|
||||
- Slower encoding
|
||||
- Future-proof format
|
||||
- Ideal for archival
|
||||
|
||||
### HEVC
|
||||
- Excellent compression
|
||||
- Wide compatibility
|
||||
- Good encoding speed
|
||||
- Recommended for most users
|
||||
|
||||
### H.264
|
||||
- Universal compatibility
|
||||
- Fast encoding
|
||||
- Larger file sizes
|
||||
- Good for compatibility requirements
|
||||
754
docs/src/content/docs/troubleshooting.md
Normal file
754
docs/src/content/docs/troubleshooting.md
Normal file
@@ -0,0 +1,754 @@
|
||||
---
|
||||
title: Troubleshooting Guide
|
||||
description: Comprehensive guide to diagnosing and solving common Alchemist issues.
|
||||
---
|
||||
|
||||
# Troubleshooting Guide
|
||||
|
||||
This comprehensive guide helps you diagnose and resolve common issues with Alchemist. Issues are organized by category with step-by-step solutions.
|
||||
|
||||
## General Diagnostics
|
||||
|
||||
### Log Locations
|
||||
|
||||
**Default log locations:**
|
||||
- **Linux/macOS**: `~/.config/alchemist/logs/`
|
||||
- **Windows**: `%APPDATA%\alchemist\logs\`
|
||||
- **Docker**: `/app/data/logs/` (if mounted)
|
||||
|
||||
**View recent logs:**
|
||||
```bash
|
||||
# Real-time log monitoring
|
||||
tail -f ~/.config/alchemist/logs/alchemist.log
|
||||
|
||||
# Last 100 lines
|
||||
tail -100 ~/.config/alchemist/logs/alchemist.log
|
||||
|
||||
# Search for errors
|
||||
grep -i error ~/.config/alchemist/logs/alchemist.log
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable detailed logging:
|
||||
```bash
|
||||
# Environment variable
|
||||
RUST_LOG=debug ./alchemist
|
||||
|
||||
# Or in Docker
|
||||
docker run -e RUST_LOG=debug ghcr.io/bybrooklyn/alchemist:latest
|
||||
```
|
||||
|
||||
### System Health Check
|
||||
|
||||
Verify your setup:
|
||||
```bash
|
||||
# Check FFmpeg
|
||||
ffmpeg -version
|
||||
|
||||
# Test hardware encoders
|
||||
ffmpeg -encoders | grep -E "nvenc|qsv|vaapi|amf|videotoolbox"
|
||||
|
||||
# Check disk space
|
||||
df -h
|
||||
|
||||
# Monitor system resources
|
||||
htop # or top on basic systems
|
||||
```
|
||||
|
||||
## Authentication & Access Issues
|
||||
|
||||
### "Failed to load settings" / 401 Unauthorized
|
||||
|
||||
**Symptoms:**
|
||||
- Can't access web interface
|
||||
- "Authentication failed" messages
|
||||
- Redirected to login repeatedly
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Clear browser storage:**
|
||||
```javascript
|
||||
// Open browser dev tools (F12), then Console tab
|
||||
localStorage.clear();
|
||||
sessionStorage.clear();
|
||||
location.reload();
|
||||
```
|
||||
|
||||
2. **Reset admin password:**
|
||||
```bash
|
||||
# Stop Alchemist, then reset database auth
|
||||
sqlite3 ~/.config/alchemist/alchemist.db "DELETE FROM user_sessions;"
|
||||
sqlite3 ~/.config/alchemist/alchemist.db "UPDATE users SET password_hash = '' WHERE username = 'admin';"
|
||||
```
|
||||
|
||||
3. **Check configuration:**
|
||||
```toml
|
||||
[system]
|
||||
https_only = false # Ensure this is false unless using HTTPS
|
||||
```
|
||||
|
||||
### Can't Access Web Interface
|
||||
|
||||
**Symptoms:**
|
||||
- Browser shows "connection refused"
|
||||
- Timeout errors
|
||||
- Blank page loads
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Verify Alchemist is running:**
|
||||
```bash
|
||||
# Check process
|
||||
ps aux | grep alchemist
|
||||
|
||||
# Check port binding
|
||||
netstat -tlnp | grep 3000
|
||||
```
|
||||
|
||||
2. **Check firewall settings:**
|
||||
```bash
|
||||
# Linux (ufw)
|
||||
sudo ufw allow 3000
|
||||
|
||||
# Linux (firewall-cmd)
|
||||
sudo firewall-cmd --add-port=3000/tcp --permanent
|
||||
sudo firewall-cmd --reload
|
||||
```
|
||||
|
||||
3. **Try different browsers/incognito mode**
|
||||
|
||||
4. **Check Docker port mapping:**
|
||||
```yaml
|
||||
# Ensure correct port mapping in docker-compose.yml
|
||||
ports:
|
||||
- "3000:3000" # Host:Container
|
||||
```
|
||||
|
||||
## Hardware Detection Issues
|
||||
|
||||
### "No hardware encoder detected"
|
||||
|
||||
**Symptoms:**
|
||||
- Only CPU encoding available
|
||||
- Hardware shows as "Not detected"
|
||||
- Slow transcoding speeds
|
||||
|
||||
**Diagnosis steps:**
|
||||
|
||||
1. **Check hardware detection:**
|
||||
```bash
|
||||
# NVIDIA
|
||||
nvidia-smi
|
||||
|
||||
# Intel
|
||||
ls -la /dev/dri/
|
||||
|
||||
# AMD
|
||||
lspci | grep -i amd
|
||||
|
||||
# Apple (macOS)
|
||||
system_profiler SPHardwareDataType
|
||||
```
|
||||
|
||||
2. **Verify FFmpeg support:**
|
||||
```bash
|
||||
ffmpeg -encoders | grep -E "nvenc|qsv|vaapi|amf|videotoolbox"
|
||||
```
|
||||
|
||||
**Solutions by vendor:**
|
||||
|
||||
#### NVIDIA Issues
|
||||
```bash
|
||||
# Install NVIDIA Container Toolkit (Docker)
|
||||
# Ubuntu/Debian:
|
||||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
|
||||
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
|
||||
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
|
||||
sudo systemctl restart docker
|
||||
|
||||
# Test GPU access in container
|
||||
docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi
|
||||
```
|
||||
|
||||
#### Intel Issues
|
||||
```bash
|
||||
# Ensure iGPU is enabled in BIOS
|
||||
# Add user to video/render groups
|
||||
sudo usermod -a -G video,render $USER
|
||||
|
||||
# Install Intel media drivers (Ubuntu/Debian)
|
||||
sudo apt install intel-media-va-driver libva-utils
|
||||
|
||||
# Test VAAPI
|
||||
vainfo --display drm --device /dev/dri/renderD128
|
||||
```
|
||||
|
||||
#### AMD Issues
|
||||
```bash
|
||||
# Install Mesa drivers (Ubuntu/Debian)
|
||||
sudo apt install mesa-va-drivers libva-utils
|
||||
|
||||
# Add user to video/render groups
|
||||
sudo usermod -a -G video,render $USER
|
||||
|
||||
# Test VAAPI
|
||||
vainfo --display drm --device /dev/dri/renderD128
|
||||
```
|
||||
|
||||
### Hardware Detected But Encoding Fails
|
||||
|
||||
**Symptoms:**
|
||||
- Hardware shows as detected
|
||||
- Encoding jobs fail with GPU errors
|
||||
- Falls back to CPU
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check GPU memory:**
|
||||
```bash
|
||||
# NVIDIA
|
||||
nvidia-smi
|
||||
|
||||
# Intel/AMD - check system memory if integrated
|
||||
free -h
|
||||
```
|
||||
|
||||
2. **Reduce concurrent jobs:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1 # Start with 1
|
||||
```
|
||||
|
||||
3. **Update drivers:**
|
||||
- NVIDIA: Download from nvidia.com
|
||||
- Intel: Update through Windows Update or Intel Driver Assistant
|
||||
- AMD: Download from amd.com/support
|
||||
|
||||
4. **Check for conflicting processes:**
|
||||
```bash
|
||||
# See what's using the GPU
|
||||
nvidia-smi # NVIDIA
|
||||
intel_gpu_top # Intel
|
||||
```
|
||||
|
||||
## Processing Issues
|
||||
|
||||
### Jobs Stuck in "Queued" State
|
||||
|
||||
**Symptoms:**
|
||||
- Jobs never start processing
|
||||
- Queue doesn't advance
|
||||
- Dashboard shows "paused" or "idle"
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check engine status:**
|
||||
- Navigate to Dashboard
|
||||
- Look for "Paused" indicator
|
||||
- Click "Resume" if available
|
||||
|
||||
2. **Check system resources:**
|
||||
```bash
|
||||
# CPU usage
|
||||
top
|
||||
|
||||
# Memory usage
|
||||
free -h
|
||||
|
||||
# Disk space
|
||||
df -h
|
||||
```
|
||||
|
||||
3. **Restart the processor:**
|
||||
```bash
|
||||
# Binary installation
|
||||
pkill alchemist
|
||||
./alchemist
|
||||
|
||||
# Docker
|
||||
docker restart alchemist
|
||||
```
|
||||
|
||||
4. **Check scheduling windows:**
|
||||
```toml
|
||||
# Ensure schedule allows current time
|
||||
[schedule]
|
||||
[[schedule.windows]]
|
||||
start_time = "00:00" # 24/7 operation
|
||||
end_time = "23:59"
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Jobs Fail Immediately
|
||||
|
||||
**Symptoms:**
|
||||
- Jobs start but fail within seconds
|
||||
- "Encoding failed" messages in logs
|
||||
- No output files created
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
1. **Check specific error in logs:**
|
||||
```bash
|
||||
grep -A5 -B5 "failed" ~/.config/alchemist/logs/alchemist.log
|
||||
```
|
||||
|
||||
2. **Test FFmpeg command manually:**
|
||||
```bash
|
||||
# Extract the failed command from logs and test
|
||||
ffmpeg -i input.mkv -c:v libx264 -crf 23 test_output.mkv
|
||||
```
|
||||
|
||||
**Common solutions:**
|
||||
|
||||
1. **File permission issues:**
|
||||
```bash
|
||||
# Check file permissions
|
||||
ls -la /path/to/media/
|
||||
|
||||
# Fix permissions if needed
|
||||
chmod 644 /path/to/media/*
|
||||
```
|
||||
|
||||
2. **Corrupt source files:**
|
||||
```bash
|
||||
# Test source file
|
||||
ffmpeg -v error -i input.mkv -f null -
|
||||
```
|
||||
|
||||
3. **Insufficient disk space:**
|
||||
```bash
|
||||
# Check available space
|
||||
df -h
|
||||
|
||||
# Clean up if needed
|
||||
du -sh ~/.config/alchemist/logs/* | sort -h
|
||||
```
|
||||
|
||||
### Poor Quality Output
|
||||
|
||||
**Symptoms:**
|
||||
- Encoded files look worse than originals
|
||||
- Artifacts or blocking visible
|
||||
- Low VMAF scores
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Adjust quality settings:**
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "quality" # Use highest quality
|
||||
|
||||
# Or manually adjust CRF
|
||||
[transcode.encoder_args]
|
||||
extra_args = ["-crf", "20"] # Lower = better quality
|
||||
```
|
||||
|
||||
2. **Check source file quality:**
|
||||
```bash
|
||||
# Analyze source with ffprobe
|
||||
ffprobe -v quiet -show_format -show_streams input.mkv
|
||||
```
|
||||
|
||||
3. **Enable quality verification:**
|
||||
```toml
|
||||
[quality]
|
||||
enable_vmaf = true
|
||||
min_vmaf_score = 92.0 # Reject low quality transcodes
|
||||
revert_on_low_quality = true
|
||||
```
|
||||
|
||||
4. **Use appropriate codec:**
|
||||
```toml
|
||||
# For maximum quality
|
||||
[transcode]
|
||||
output_codec = "hevc" # Better than H.264
|
||||
quality_profile = "quality"
|
||||
```
|
||||
|
||||
## Performance Issues
|
||||
|
||||
### High CPU Usage During Encoding
|
||||
|
||||
**Symptoms:**
|
||||
- System becomes unresponsive
|
||||
- High CPU temperatures
|
||||
- Fan noise increases
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Verify hardware acceleration:**
|
||||
```bash
|
||||
# Check if GPU is being used
|
||||
nvidia-smi # Should show ffmpeg processes
|
||||
```
|
||||
|
||||
2. **Reduce CPU load:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1
|
||||
threads = 4 # Limit CPU threads
|
||||
|
||||
[system]
|
||||
engine_mode = "background" # Minimal resource usage
|
||||
```
|
||||
|
||||
3. **Enable hardware acceleration:**
|
||||
```toml
|
||||
[hardware]
|
||||
preferred_vendor = "nvidia" # or intel/amd/apple
|
||||
allow_cpu_fallback = false # Force hardware encoding
|
||||
```
|
||||
|
||||
### Slow Encoding Speeds
|
||||
|
||||
**Expected speeds** for reference:
|
||||
- **GPU encoding**: 1-5x realtime
|
||||
- **CPU encoding**: 0.1-2x realtime
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check hardware utilization:**
|
||||
```bash
|
||||
# GPU usage
|
||||
nvidia-smi -l 1 # NVIDIA
|
||||
intel_gpu_top # Intel
|
||||
|
||||
# CPU usage
|
||||
htop
|
||||
```
|
||||
|
||||
2. **Optimize settings for speed:**
|
||||
```toml
|
||||
[transcode]
|
||||
quality_profile = "speed"
|
||||
output_codec = "h264" # Fastest encoding
|
||||
|
||||
[hardware]
|
||||
cpu_preset = "fast" # If using CPU fallback
|
||||
```
|
||||
|
||||
3. **Check thermal throttling:**
|
||||
```bash
|
||||
# Linux
|
||||
sensors
|
||||
|
||||
# macOS
|
||||
sudo powermetrics --samplers smc -n 1 | grep -i temp
|
||||
```
|
||||
|
||||
### High Memory Usage
|
||||
|
||||
**Symptoms:**
|
||||
- System uses excessive RAM
|
||||
- Out of memory errors
|
||||
- System becomes unstable
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Reduce memory usage:**
|
||||
```toml
|
||||
[transcode]
|
||||
concurrent_jobs = 1 # Reduce parallel processing
|
||||
threads = 4 # Lower thread count
|
||||
```
|
||||
|
||||
2. **Check for memory leaks:**
|
||||
```bash
|
||||
# Monitor Alchemist memory usage
|
||||
ps aux | grep alchemist
|
||||
|
||||
# Monitor over time
|
||||
while true; do ps -p $(pgrep alchemist) -o pid,ppid,cmd,%mem,%cpu; sleep 30; done
|
||||
```
|
||||
|
||||
3. **Restart periodically:**
|
||||
```bash
|
||||
# Set up log rotation and periodic restart
|
||||
# Add to crontab for daily restart
|
||||
0 6 * * * docker restart alchemist
|
||||
```
|
||||
|
||||
## Database Issues
|
||||
|
||||
### Database Locked Errors
|
||||
|
||||
**Symptoms:**
|
||||
- "Database is locked" in logs
|
||||
- Web interface becomes unresponsive
|
||||
- Jobs don't update status
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Stop all Alchemist processes:**
|
||||
```bash
|
||||
# Kill all instances
|
||||
pkill -f alchemist
|
||||
|
||||
# Or for Docker
|
||||
docker stop alchemist
|
||||
```
|
||||
|
||||
2. **Check for database corruption:**
|
||||
```bash
|
||||
# Test database integrity
|
||||
sqlite3 ~/.config/alchemist/alchemist.db "PRAGMA integrity_check;"
|
||||
```
|
||||
|
||||
3. **Backup and reset if needed:**
|
||||
```bash
|
||||
# Backup current database
|
||||
cp ~/.config/alchemist/alchemist.db ~/.config/alchemist/alchemist.db.backup
|
||||
|
||||
# If corrupted, reset (loses job history)
|
||||
rm ~/.config/alchemist/alchemist.db
|
||||
# Alchemist will recreate on next start
|
||||
```
|
||||
|
||||
### Migration Errors
|
||||
|
||||
**Symptoms:**
|
||||
- "Migration failed" on startup
|
||||
- Database version mismatch errors
|
||||
- Unable to start after update
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Backup database before fixes:**
|
||||
```bash
|
||||
cp ~/.config/alchemist/alchemist.db ~/.config/alchemist/alchemist.db.pre-fix
|
||||
```
|
||||
|
||||
2. **Check database version:**
|
||||
```bash
|
||||
sqlite3 ~/.config/alchemist/alchemist.db "PRAGMA user_version;"
|
||||
```
|
||||
|
||||
3. **Force migration recovery:**
|
||||
```bash
|
||||
# Stop Alchemist first
|
||||
# Then try manual schema fix (advanced users only)
|
||||
sqlite3 ~/.config/alchemist/alchemist.db
|
||||
# Run appropriate CREATE TABLE statements from migration files
|
||||
```
|
||||
|
||||
## Network & Connectivity
|
||||
|
||||
### API Timeout Errors
|
||||
|
||||
**Symptoms:**
|
||||
- Web interface loads slowly
|
||||
- "Request timeout" errors
|
||||
- Incomplete data loading
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check system load:**
|
||||
```bash
|
||||
uptime
|
||||
htop
|
||||
```
|
||||
|
||||
2. **Increase timeout values:**
|
||||
```toml
|
||||
[system]
|
||||
monitoring_poll_interval = 5.0 # Slower polling
|
||||
```
|
||||
|
||||
3. **Optimize database:**
|
||||
```bash
|
||||
# Vacuum database
|
||||
sqlite3 ~/.config/alchemist/alchemist.db "VACUUM;"
|
||||
|
||||
# Reindex
|
||||
sqlite3 ~/.config/alchemist/alchemist.db "REINDEX;"
|
||||
```
|
||||
|
||||
### Notification Delivery Issues
|
||||
|
||||
**Symptoms:**
|
||||
- Discord/Gotify notifications not received
|
||||
- Webhook timeouts
|
||||
- "Failed to send notification" in logs
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Test webhook manually:**
|
||||
```bash
|
||||
# Test Discord webhook
|
||||
curl -X POST -H "Content-Type: application/json" \
|
||||
-d '{"content":"Test from Alchemist"}' \
|
||||
"YOUR_DISCORD_WEBHOOK_URL"
|
||||
```
|
||||
|
||||
2. **Check firewall/network:**
|
||||
```bash
|
||||
# Test external connectivity
|
||||
curl -I https://discord.com
|
||||
|
||||
# Check DNS resolution
|
||||
nslookup discord.com
|
||||
```
|
||||
|
||||
3. **Verify notification config:**
|
||||
```toml
|
||||
[[notifications.targets]]
|
||||
name = "discord"
|
||||
target_type = "discord"
|
||||
endpoint_url = "https://discord.com/api/webhooks/..."
|
||||
enabled = true
|
||||
events = ["job_complete", "job_failed"]
|
||||
```
|
||||
|
||||
## File System Issues
|
||||
|
||||
### Permission Denied Errors
|
||||
|
||||
**Symptoms:**
|
||||
- Can't read source files
|
||||
- Can't write output files
|
||||
- "Permission denied" in logs
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Check file ownership:**
|
||||
```bash
|
||||
ls -la /path/to/media/
|
||||
|
||||
# Fix ownership if needed
|
||||
sudo chown -R $USER:$USER /path/to/media/
|
||||
```
|
||||
|
||||
2. **Docker user mapping:**
|
||||
```yaml
|
||||
services:
|
||||
alchemist:
|
||||
user: "${UID}:${GID}" # Match host user
|
||||
# or
|
||||
environment:
|
||||
- PUID=1000
|
||||
- PGID=1000
|
||||
```
|
||||
|
||||
3. **SELinux/AppArmor issues (Linux):**
|
||||
```bash
|
||||
# Check SELinux
|
||||
getenforce
|
||||
|
||||
# Temporarily disable for testing
|
||||
sudo setenforce 0
|
||||
|
||||
# Check AppArmor
|
||||
sudo aa-status
|
||||
```
|
||||
|
||||
### Files Not Found
|
||||
|
||||
**Symptoms:**
|
||||
- "File not found" despite file existing
|
||||
- Scan doesn't find media files
|
||||
- Empty libraries
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Verify paths in config:**
|
||||
```toml
|
||||
[scanner]
|
||||
directories = ["/correct/path/to/media"] # Check this path
|
||||
```
|
||||
|
||||
2. **Check file extensions:**
|
||||
```bash
|
||||
# See what files exist
|
||||
find /path/to/media -name "*.mkv" -o -name "*.mp4" -o -name "*.avi" | head -10
|
||||
```
|
||||
|
||||
3. **Test file access:**
|
||||
```bash
|
||||
# Can Alchemist user access the file?
|
||||
stat /path/to/media/movie.mkv
|
||||
```
|
||||
|
||||
## Recovery Procedures
|
||||
|
||||
### Complete Reset
|
||||
|
||||
If all else fails, reset Alchemist to fresh state:
|
||||
|
||||
1. **Backup important data:**
|
||||
```bash
|
||||
cp ~/.config/alchemist/config.toml ~/alchemist-config-backup.toml
|
||||
```
|
||||
|
||||
2. **Stop Alchemist:**
|
||||
```bash
|
||||
pkill -f alchemist
|
||||
# or
|
||||
docker stop alchemist
|
||||
```
|
||||
|
||||
3. **Reset database and config:**
|
||||
```bash
|
||||
rm -rf ~/.config/alchemist/
|
||||
# Alchemist will run setup wizard on next start
|
||||
```
|
||||
|
||||
### Partial Recovery
|
||||
|
||||
Keep configuration but reset job history:
|
||||
|
||||
```bash
|
||||
# Stop Alchemist
|
||||
pkill -f alchemist
|
||||
|
||||
# Reset only job-related tables
|
||||
sqlite3 ~/.config/alchemist/alchemist.db << EOF
|
||||
DELETE FROM jobs;
|
||||
DELETE FROM job_progress;
|
||||
DELETE FROM encoding_sessions;
|
||||
VACUUM;
|
||||
EOF
|
||||
```
|
||||
|
||||
## Getting Help
|
||||
|
||||
When seeking support:
|
||||
|
||||
1. **Gather system information:**
|
||||
```bash
|
||||
# Create debug info file
|
||||
{
|
||||
echo "=== System Info ==="
|
||||
uname -a
|
||||
echo ""
|
||||
echo "=== FFmpeg Version ==="
|
||||
ffmpeg -version
|
||||
echo ""
|
||||
echo "=== Hardware Info ==="
|
||||
lscpu
|
||||
lspci | grep -i vga
|
||||
echo ""
|
||||
echo "=== Recent Logs ==="
|
||||
tail -50 ~/.config/alchemist/logs/alchemist.log
|
||||
} > alchemist-debug.txt
|
||||
```
|
||||
|
||||
2. **Include configuration** (remove sensitive data):
|
||||
```bash
|
||||
# Sanitize config
|
||||
sed 's/password.*/password=REDACTED/' ~/.config/alchemist/config.toml > config-sanitized.toml
|
||||
```
|
||||
|
||||
3. **Describe the issue:**
|
||||
- What were you trying to do?
|
||||
- What happened instead?
|
||||
- When did it start happening?
|
||||
- What changed recently?
|
||||
|
||||
4. **Report issues at:**
|
||||
- GitHub: [github.com/bybrooklyn/alchemist/issues](https://github.com/bybrooklyn/alchemist/issues)
|
||||
- Include debug info and sanitized config
|
||||
@@ -456,6 +456,9 @@ pub struct SystemConfig {
|
||||
pub log_retention_days: Option<u32>,
|
||||
#[serde(default)]
|
||||
pub engine_mode: EngineMode,
|
||||
/// Enable HSTS header (only enable if running behind HTTPS)
|
||||
#[serde(default)]
|
||||
pub https_only: bool,
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
@@ -481,6 +484,7 @@ impl Default for SystemConfig {
|
||||
enable_telemetry: default_telemetry(),
|
||||
log_retention_days: default_log_retention_days(),
|
||||
engine_mode: EngineMode::default(),
|
||||
https_only: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -595,6 +599,7 @@ impl Default for Config {
|
||||
enable_telemetry: default_telemetry(),
|
||||
log_retention_days: default_log_retention_days(),
|
||||
engine_mode: EngineMode::default(),
|
||||
https_only: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
788
src/db.rs
788
src/db.rs
@@ -1,4 +1,4 @@
|
||||
use crate::error::Result;
|
||||
use crate::error::{AlchemistError, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
@@ -9,6 +9,7 @@ use sqlx::{
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
use tracing::info;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, sqlx::Type)]
|
||||
@@ -68,6 +69,130 @@ pub enum AlchemistEvent {
|
||||
},
|
||||
}
|
||||
|
||||
// New typed event channels for separating high-volume vs low-volume events
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", content = "data")]
|
||||
pub enum JobEvent {
|
||||
StateChanged {
|
||||
job_id: i64,
|
||||
status: JobState,
|
||||
},
|
||||
Progress {
|
||||
job_id: i64,
|
||||
percentage: f64,
|
||||
time: String,
|
||||
},
|
||||
Decision {
|
||||
job_id: i64,
|
||||
action: String,
|
||||
reason: String,
|
||||
},
|
||||
Log {
|
||||
level: String,
|
||||
job_id: Option<i64>,
|
||||
message: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", content = "data")]
|
||||
pub enum ConfigEvent {
|
||||
Updated(Box<crate::config::Config>),
|
||||
WatchFolderAdded(String),
|
||||
WatchFolderRemoved(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type", content = "data")]
|
||||
pub enum SystemEvent {
|
||||
ScanStarted,
|
||||
ScanCompleted,
|
||||
EngineStatusChanged,
|
||||
HardwareStateChanged,
|
||||
}
|
||||
|
||||
pub struct EventChannels {
|
||||
pub jobs: tokio::sync::broadcast::Sender<JobEvent>, // 1000 capacity - high volume
|
||||
pub config: tokio::sync::broadcast::Sender<ConfigEvent>, // 50 capacity - rare
|
||||
pub system: tokio::sync::broadcast::Sender<SystemEvent>, // 100 capacity - medium
|
||||
}
|
||||
|
||||
// Convert JobEvent to legacy AlchemistEvent for backwards compatibility
|
||||
impl From<JobEvent> for AlchemistEvent {
|
||||
fn from(job_event: JobEvent) -> Self {
|
||||
match job_event {
|
||||
JobEvent::StateChanged { job_id, status } => {
|
||||
AlchemistEvent::JobStateChanged { job_id, status }
|
||||
}
|
||||
JobEvent::Progress {
|
||||
job_id,
|
||||
percentage,
|
||||
time,
|
||||
} => AlchemistEvent::Progress {
|
||||
job_id,
|
||||
percentage,
|
||||
time,
|
||||
},
|
||||
JobEvent::Decision {
|
||||
job_id,
|
||||
action,
|
||||
reason,
|
||||
} => AlchemistEvent::Decision {
|
||||
job_id,
|
||||
action,
|
||||
reason,
|
||||
},
|
||||
JobEvent::Log {
|
||||
level,
|
||||
job_id,
|
||||
message,
|
||||
} => AlchemistEvent::Log {
|
||||
level,
|
||||
job_id,
|
||||
message,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert AlchemistEvent to JobEvent for migration
|
||||
impl From<AlchemistEvent> for JobEvent {
|
||||
fn from(alchemist_event: AlchemistEvent) -> Self {
|
||||
match alchemist_event {
|
||||
AlchemistEvent::JobStateChanged { job_id, status } => {
|
||||
JobEvent::StateChanged { job_id, status }
|
||||
}
|
||||
AlchemistEvent::Progress {
|
||||
job_id,
|
||||
percentage,
|
||||
time,
|
||||
} => JobEvent::Progress {
|
||||
job_id,
|
||||
percentage,
|
||||
time,
|
||||
},
|
||||
AlchemistEvent::Decision {
|
||||
job_id,
|
||||
action,
|
||||
reason,
|
||||
} => JobEvent::Decision {
|
||||
job_id,
|
||||
action,
|
||||
reason,
|
||||
},
|
||||
AlchemistEvent::Log {
|
||||
level,
|
||||
job_id,
|
||||
message,
|
||||
} => JobEvent::Log {
|
||||
level,
|
||||
job_id,
|
||||
message,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for JobState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let s = match self {
|
||||
@@ -453,6 +578,24 @@ pub struct Decision {
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Default timeout for potentially slow database queries
|
||||
const QUERY_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
/// Execute a query with a timeout to prevent blocking the job loop
|
||||
async fn timed_query<T, F, Fut>(operation: &str, f: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>,
|
||||
{
|
||||
match timeout(QUERY_TIMEOUT, f()).await {
|
||||
Ok(result) => result,
|
||||
Err(_) => Err(AlchemistError::QueryTimeout(
|
||||
QUERY_TIMEOUT.as_secs(),
|
||||
operation.to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Db {
|
||||
pool: SqlitePool,
|
||||
@@ -657,22 +800,26 @@ impl Db {
|
||||
}
|
||||
|
||||
pub async fn get_all_jobs(&self) -> Result<Vec<Job>> {
|
||||
let jobs = sqlx::query_as::<_, Job>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE j.archived = 0
|
||||
ORDER BY j.updated_at DESC",
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_all_jobs", || async {
|
||||
let jobs = sqlx::query_as::<_, Job>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE j.archived = 0
|
||||
ORDER BY j.updated_at DESC",
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
Ok(jobs)
|
||||
Ok(jobs)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_job_decision(&self, job_id: i64) -> Result<Option<Decision>> {
|
||||
@@ -687,19 +834,23 @@ impl Db {
|
||||
}
|
||||
|
||||
pub async fn get_stats(&self) -> Result<serde_json::Value> {
|
||||
let stats = sqlx::query("SELECT status, count(*) as count FROM jobs GROUP BY status")
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_stats", || async {
|
||||
let stats = sqlx::query("SELECT status, count(*) as count FROM jobs GROUP BY status")
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut map = serde_json::Map::new();
|
||||
for row in stats {
|
||||
use sqlx::Row;
|
||||
let status: String = row.get("status");
|
||||
let count: i64 = row.get("count");
|
||||
map.insert(status, serde_json::Value::Number(count.into()));
|
||||
}
|
||||
let mut map = serde_json::Map::new();
|
||||
for row in stats {
|
||||
use sqlx::Row;
|
||||
let status: String = row.get("status");
|
||||
let count: i64 = row.get("count");
|
||||
map.insert(status, serde_json::Value::Number(count.into()));
|
||||
}
|
||||
|
||||
Ok(serde_json::Value::Object(map))
|
||||
Ok(serde_json::Value::Object(map))
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Update job progress (for resume support)
|
||||
@@ -800,83 +951,91 @@ impl Db {
|
||||
|
||||
/// Get jobs by status
|
||||
pub async fn get_jobs_by_status(&self, status: JobState) -> Result<Vec<Job>> {
|
||||
let jobs = sqlx::query_as::<_, Job>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE j.status = ? AND j.archived = 0
|
||||
ORDER BY j.priority DESC, j.created_at ASC",
|
||||
)
|
||||
.bind(status)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_jobs_by_status", || async {
|
||||
let jobs = sqlx::query_as::<_, Job>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE j.status = ? AND j.archived = 0
|
||||
ORDER BY j.priority DESC, j.created_at ASC",
|
||||
)
|
||||
.bind(status)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
Ok(jobs)
|
||||
Ok(jobs)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get jobs with filtering, sorting and pagination
|
||||
pub async fn get_jobs_filtered(&self, query: JobFilterQuery) -> Result<Vec<Job>> {
|
||||
let mut qb = sqlx::QueryBuilder::<sqlx::Sqlite>::new(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE 1 = 1 "
|
||||
);
|
||||
let pool = &self.pool;
|
||||
timed_query("get_jobs_filtered", || async {
|
||||
let mut qb = sqlx::QueryBuilder::<sqlx::Sqlite>::new(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE 1 = 1 "
|
||||
);
|
||||
|
||||
match query.archived {
|
||||
Some(true) => {
|
||||
qb.push(" AND j.archived = 1 ");
|
||||
}
|
||||
Some(false) => {
|
||||
qb.push(" AND j.archived = 0 ");
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
|
||||
if let Some(statuses) = query.statuses {
|
||||
if !statuses.is_empty() {
|
||||
qb.push(" AND j.status IN (");
|
||||
let mut separated = qb.separated(", ");
|
||||
for status in statuses {
|
||||
separated.push_bind(status);
|
||||
match query.archived {
|
||||
Some(true) => {
|
||||
qb.push(" AND j.archived = 1 ");
|
||||
}
|
||||
separated.push_unseparated(") ");
|
||||
Some(false) => {
|
||||
qb.push(" AND j.archived = 0 ");
|
||||
}
|
||||
None => {}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(search) = query.search {
|
||||
qb.push(" AND j.input_path LIKE ");
|
||||
qb.push_bind(format!("%{}%", search));
|
||||
}
|
||||
if let Some(ref statuses) = query.statuses {
|
||||
if !statuses.is_empty() {
|
||||
qb.push(" AND j.status IN (");
|
||||
let mut separated = qb.separated(", ");
|
||||
for status in statuses {
|
||||
separated.push_bind(*status);
|
||||
}
|
||||
separated.push_unseparated(") ");
|
||||
}
|
||||
}
|
||||
|
||||
qb.push(" ORDER BY ");
|
||||
let sort_col = match query.sort_by.as_deref() {
|
||||
Some("created_at") => "j.created_at",
|
||||
Some("updated_at") => "j.updated_at",
|
||||
Some("input_path") => "j.input_path",
|
||||
Some("size") => "(SELECT input_size_bytes FROM encode_stats WHERE job_id = j.id)",
|
||||
_ => "j.updated_at",
|
||||
};
|
||||
qb.push(sort_col);
|
||||
qb.push(if query.sort_desc { " DESC" } else { " ASC" });
|
||||
if let Some(ref search) = query.search {
|
||||
qb.push(" AND j.input_path LIKE ");
|
||||
qb.push_bind(format!("%{}%", search));
|
||||
}
|
||||
|
||||
qb.push(" LIMIT ");
|
||||
qb.push_bind(query.limit);
|
||||
qb.push(" OFFSET ");
|
||||
qb.push_bind(query.offset);
|
||||
qb.push(" ORDER BY ");
|
||||
let sort_col = match query.sort_by.as_deref() {
|
||||
Some("created_at") => "j.created_at",
|
||||
Some("updated_at") => "j.updated_at",
|
||||
Some("input_path") => "j.input_path",
|
||||
Some("size") => "(SELECT input_size_bytes FROM encode_stats WHERE job_id = j.id)",
|
||||
_ => "j.updated_at",
|
||||
};
|
||||
qb.push(sort_col);
|
||||
qb.push(if query.sort_desc { " DESC" } else { " ASC" });
|
||||
|
||||
let jobs = qb.build_query_as::<Job>().fetch_all(&self.pool).await?;
|
||||
Ok(jobs)
|
||||
qb.push(" LIMIT ");
|
||||
qb.push_bind(query.limit);
|
||||
qb.push(" OFFSET ");
|
||||
qb.push_bind(query.offset);
|
||||
|
||||
let jobs = qb.build_query_as::<Job>().fetch_all(pool).await?;
|
||||
Ok(jobs)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn batch_cancel_jobs(&self, ids: &[i64]) -> Result<u64> {
|
||||
@@ -1590,156 +1749,173 @@ impl Db {
|
||||
}
|
||||
|
||||
pub async fn get_aggregated_stats(&self) -> Result<AggregatedStats> {
|
||||
let row = sqlx::query(
|
||||
"SELECT
|
||||
(SELECT COUNT(*) FROM jobs) as total_jobs,
|
||||
(SELECT COUNT(*) FROM jobs WHERE status = 'completed') as completed_jobs,
|
||||
COALESCE(SUM(input_size_bytes), 0) as total_input_size,
|
||||
COALESCE(SUM(output_size_bytes), 0) as total_output_size,
|
||||
AVG(vmaf_score) as avg_vmaf,
|
||||
COALESCE(SUM(encode_time_seconds), 0.0) as total_encode_time
|
||||
FROM encode_stats",
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_aggregated_stats", || async {
|
||||
let row = sqlx::query(
|
||||
"SELECT
|
||||
(SELECT COUNT(*) FROM jobs) as total_jobs,
|
||||
(SELECT COUNT(*) FROM jobs WHERE status = 'completed') as completed_jobs,
|
||||
COALESCE(SUM(input_size_bytes), 0) as total_input_size,
|
||||
COALESCE(SUM(output_size_bytes), 0) as total_output_size,
|
||||
AVG(vmaf_score) as avg_vmaf,
|
||||
COALESCE(SUM(encode_time_seconds), 0.0) as total_encode_time
|
||||
FROM encode_stats",
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(AggregatedStats {
|
||||
total_jobs: row.get("total_jobs"),
|
||||
completed_jobs: row.get("completed_jobs"),
|
||||
total_input_size: row.get("total_input_size"),
|
||||
total_output_size: row.get("total_output_size"),
|
||||
avg_vmaf: row.get("avg_vmaf"),
|
||||
total_encode_time_seconds: row.get("total_encode_time"),
|
||||
Ok(AggregatedStats {
|
||||
total_jobs: row.get("total_jobs"),
|
||||
completed_jobs: row.get("completed_jobs"),
|
||||
total_input_size: row.get("total_input_size"),
|
||||
total_output_size: row.get("total_output_size"),
|
||||
avg_vmaf: row.get("avg_vmaf"),
|
||||
total_encode_time_seconds: row.get("total_encode_time"),
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get daily statistics for the last N days (for time-series charts)
|
||||
pub async fn get_daily_stats(&self, days: i32) -> Result<Vec<DailyStats>> {
|
||||
let rows = sqlx::query(
|
||||
"SELECT
|
||||
DATE(e.created_at) as date,
|
||||
COUNT(*) as jobs_completed,
|
||||
COALESCE(SUM(e.input_size_bytes - e.output_size_bytes), 0) as bytes_saved,
|
||||
COALESCE(SUM(e.input_size_bytes), 0) as total_input_bytes,
|
||||
COALESCE(SUM(e.output_size_bytes), 0) as total_output_bytes
|
||||
FROM encode_stats e
|
||||
WHERE e.created_at >= DATE('now', ? || ' days')
|
||||
GROUP BY DATE(e.created_at)
|
||||
ORDER BY date ASC",
|
||||
)
|
||||
.bind(format!("-{}", days))
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
let days_str = format!("-{}", days);
|
||||
timed_query("get_daily_stats", || async {
|
||||
let rows = sqlx::query(
|
||||
"SELECT
|
||||
DATE(e.created_at) as date,
|
||||
COUNT(*) as jobs_completed,
|
||||
COALESCE(SUM(e.input_size_bytes - e.output_size_bytes), 0) as bytes_saved,
|
||||
COALESCE(SUM(e.input_size_bytes), 0) as total_input_bytes,
|
||||
COALESCE(SUM(e.output_size_bytes), 0) as total_output_bytes
|
||||
FROM encode_stats e
|
||||
WHERE e.created_at >= DATE('now', ? || ' days')
|
||||
GROUP BY DATE(e.created_at)
|
||||
ORDER BY date ASC",
|
||||
)
|
||||
.bind(&days_str)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let stats = rows
|
||||
.iter()
|
||||
.map(|row| DailyStats {
|
||||
date: row.get("date"),
|
||||
jobs_completed: row.get("jobs_completed"),
|
||||
bytes_saved: row.get("bytes_saved"),
|
||||
total_input_bytes: row.get("total_input_bytes"),
|
||||
total_output_bytes: row.get("total_output_bytes"),
|
||||
})
|
||||
.collect();
|
||||
let stats = rows
|
||||
.iter()
|
||||
.map(|row| DailyStats {
|
||||
date: row.get("date"),
|
||||
jobs_completed: row.get("jobs_completed"),
|
||||
bytes_saved: row.get("bytes_saved"),
|
||||
total_input_bytes: row.get("total_input_bytes"),
|
||||
total_output_bytes: row.get("total_output_bytes"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(stats)
|
||||
Ok(stats)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Get detailed per-job encoding statistics (most recent first)
|
||||
pub async fn get_detailed_encode_stats(&self, limit: i32) -> Result<Vec<DetailedEncodeStats>> {
|
||||
let stats = sqlx::query_as::<_, DetailedEncodeStats>(
|
||||
"SELECT
|
||||
e.job_id,
|
||||
j.input_path,
|
||||
e.input_size_bytes,
|
||||
e.output_size_bytes,
|
||||
e.compression_ratio,
|
||||
e.encode_time_seconds,
|
||||
e.encode_speed,
|
||||
e.avg_bitrate_kbps,
|
||||
e.vmaf_score,
|
||||
e.created_at
|
||||
FROM encode_stats e
|
||||
JOIN jobs j ON e.job_id = j.id
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT ?",
|
||||
)
|
||||
.bind(limit)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_detailed_encode_stats", || async {
|
||||
let stats = sqlx::query_as::<_, DetailedEncodeStats>(
|
||||
"SELECT
|
||||
e.job_id,
|
||||
j.input_path,
|
||||
e.input_size_bytes,
|
||||
e.output_size_bytes,
|
||||
e.compression_ratio,
|
||||
e.encode_time_seconds,
|
||||
e.encode_speed,
|
||||
e.avg_bitrate_kbps,
|
||||
e.vmaf_score,
|
||||
e.created_at
|
||||
FROM encode_stats e
|
||||
JOIN jobs j ON e.job_id = j.id
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT ?",
|
||||
)
|
||||
.bind(limit)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
Ok(stats)
|
||||
Ok(stats)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_savings_summary(&self) -> Result<SavingsSummary> {
|
||||
let totals = sqlx::query(
|
||||
"SELECT
|
||||
COALESCE(SUM(input_size_bytes), 0) as total_input_bytes,
|
||||
COALESCE(SUM(output_size_bytes), 0) as total_output_bytes,
|
||||
COUNT(*) as job_count
|
||||
FROM encode_stats
|
||||
WHERE output_size_bytes IS NOT NULL",
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_savings_summary", || async {
|
||||
let totals = sqlx::query(
|
||||
"SELECT
|
||||
COALESCE(SUM(input_size_bytes), 0) as total_input_bytes,
|
||||
COALESCE(SUM(output_size_bytes), 0) as total_output_bytes,
|
||||
COUNT(*) as job_count
|
||||
FROM encode_stats
|
||||
WHERE output_size_bytes IS NOT NULL",
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
let total_input_bytes: i64 = totals.get("total_input_bytes");
|
||||
let total_output_bytes: i64 = totals.get("total_output_bytes");
|
||||
let job_count: i64 = totals.get("job_count");
|
||||
let total_bytes_saved = (total_input_bytes - total_output_bytes).max(0);
|
||||
let savings_percent = if total_input_bytes > 0 {
|
||||
(total_bytes_saved as f64 / total_input_bytes as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let total_input_bytes: i64 = totals.get("total_input_bytes");
|
||||
let total_output_bytes: i64 = totals.get("total_output_bytes");
|
||||
let job_count: i64 = totals.get("job_count");
|
||||
let total_bytes_saved = (total_input_bytes - total_output_bytes).max(0);
|
||||
let savings_percent = if total_input_bytes > 0 {
|
||||
(total_bytes_saved as f64 / total_input_bytes as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let savings_by_codec = sqlx::query(
|
||||
"SELECT
|
||||
COALESCE(NULLIF(TRIM(e.output_codec), ''), 'unknown') as codec,
|
||||
COALESCE(SUM(e.input_size_bytes - e.output_size_bytes), 0) as bytes_saved
|
||||
FROM encode_stats e
|
||||
JOIN jobs j ON j.id = e.job_id
|
||||
WHERE e.output_size_bytes IS NOT NULL
|
||||
GROUP BY codec
|
||||
ORDER BY bytes_saved DESC, codec ASC",
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|row| CodecSavings {
|
||||
codec: row.get("codec"),
|
||||
bytes_saved: row.get("bytes_saved"),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let savings_over_time = sqlx::query(
|
||||
"SELECT
|
||||
DATE(e.created_at) as date,
|
||||
COALESCE(SUM(e.input_size_bytes - e.output_size_bytes), 0) as bytes_saved
|
||||
FROM encode_stats e
|
||||
WHERE e.output_size_bytes IS NOT NULL
|
||||
AND e.created_at >= datetime('now', '-30 days')
|
||||
GROUP BY DATE(e.created_at)
|
||||
ORDER BY date ASC",
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|row| DailySavings {
|
||||
date: row.get("date"),
|
||||
bytes_saved: row.get("bytes_saved"),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(SavingsSummary {
|
||||
total_input_bytes,
|
||||
total_output_bytes,
|
||||
total_bytes_saved,
|
||||
savings_percent,
|
||||
job_count,
|
||||
savings_by_codec,
|
||||
savings_over_time,
|
||||
let savings_by_codec = sqlx::query(
|
||||
"SELECT
|
||||
COALESCE(NULLIF(TRIM(e.output_codec), ''), 'unknown') as codec,
|
||||
COALESCE(SUM(e.input_size_bytes - e.output_size_bytes), 0) as bytes_saved
|
||||
FROM encode_stats e
|
||||
JOIN jobs j ON j.id = e.job_id
|
||||
WHERE e.output_size_bytes IS NOT NULL
|
||||
GROUP BY codec
|
||||
ORDER BY bytes_saved DESC, codec ASC",
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|row| CodecSavings {
|
||||
codec: row.get("codec"),
|
||||
bytes_saved: row.get("bytes_saved"),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let savings_over_time = sqlx::query(
|
||||
"SELECT
|
||||
DATE(e.created_at) as date,
|
||||
COALESCE(SUM(e.input_size_bytes - e.output_size_bytes), 0) as bytes_saved
|
||||
FROM encode_stats e
|
||||
WHERE e.output_size_bytes IS NOT NULL
|
||||
AND e.created_at >= datetime('now', '-30 days')
|
||||
GROUP BY DATE(e.created_at)
|
||||
ORDER BY date ASC",
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|row| DailySavings {
|
||||
date: row.get("date"),
|
||||
bytes_saved: row.get("bytes_saved"),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(SavingsSummary {
|
||||
total_input_bytes,
|
||||
total_output_bytes,
|
||||
total_bytes_saved,
|
||||
savings_percent,
|
||||
job_count,
|
||||
savings_by_codec,
|
||||
savings_over_time,
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Batch update job statuses (for batch operations)
|
||||
@@ -1822,26 +1998,30 @@ impl Db {
|
||||
}
|
||||
|
||||
pub async fn get_job_stats(&self) -> Result<JobStats> {
|
||||
let rows = sqlx::query("SELECT status, COUNT(*) as count FROM jobs GROUP BY status")
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_job_stats", || async {
|
||||
let rows = sqlx::query("SELECT status, COUNT(*) as count FROM jobs GROUP BY status")
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
let mut stats = JobStats::default();
|
||||
for row in rows {
|
||||
let status_str: String = row.get("status");
|
||||
let count: i64 = row.get("count");
|
||||
let mut stats = JobStats::default();
|
||||
for row in rows {
|
||||
let status_str: String = row.get("status");
|
||||
let count: i64 = row.get("count");
|
||||
|
||||
// Map status string to JobStats fields
|
||||
// Assuming JobState serialization matches stored strings ("queued", "active", etc)
|
||||
match status_str.as_str() {
|
||||
"queued" => stats.queued += count,
|
||||
"encoding" | "analyzing" | "remuxing" | "resuming" => stats.active += count,
|
||||
"completed" => stats.completed += count,
|
||||
"failed" | "cancelled" => stats.failed += count,
|
||||
_ => {}
|
||||
// Map status string to JobStats fields
|
||||
// Assuming JobState serialization matches stored strings ("queued", "active", etc)
|
||||
match status_str.as_str() {
|
||||
"queued" => stats.queued += count,
|
||||
"encoding" | "analyzing" | "remuxing" | "resuming" => stats.active += count,
|
||||
"completed" => stats.completed += count,
|
||||
"failed" | "cancelled" => stats.failed += count,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(stats)
|
||||
Ok(stats)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn add_log(&self, level: &str, job_id: Option<i64>, message: &str) -> Result<()> {
|
||||
@@ -2021,22 +2201,26 @@ impl Db {
|
||||
}
|
||||
|
||||
pub async fn get_health_summary(&self) -> Result<HealthSummary> {
|
||||
let row = sqlx::query(
|
||||
"SELECT
|
||||
(SELECT COUNT(*) FROM jobs WHERE last_health_check IS NOT NULL) as total_checked,
|
||||
(SELECT COUNT(*)
|
||||
FROM jobs
|
||||
WHERE health_issues IS NOT NULL AND TRIM(health_issues) != '') as issues_found,
|
||||
(SELECT MAX(started_at) FROM health_scan_runs) as last_run",
|
||||
)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
let pool = &self.pool;
|
||||
timed_query("get_health_summary", || async {
|
||||
let row = sqlx::query(
|
||||
"SELECT
|
||||
(SELECT COUNT(*) FROM jobs WHERE last_health_check IS NOT NULL) as total_checked,
|
||||
(SELECT COUNT(*)
|
||||
FROM jobs
|
||||
WHERE health_issues IS NOT NULL AND TRIM(health_issues) != '') as issues_found,
|
||||
(SELECT MAX(started_at) FROM health_scan_runs) as last_run",
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(HealthSummary {
|
||||
total_checked: row.get("total_checked"),
|
||||
issues_found: row.get("issues_found"),
|
||||
last_run: row.get("last_run"),
|
||||
Ok(HealthSummary {
|
||||
total_checked: row.get("total_checked"),
|
||||
issues_found: row.get("issues_found"),
|
||||
last_run: row.get("last_run"),
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn create_health_scan_run(&self) -> Result<i64> {
|
||||
@@ -2069,46 +2253,54 @@ impl Db {
|
||||
}
|
||||
|
||||
pub async fn get_jobs_needing_health_check(&self) -> Result<Vec<Job>> {
|
||||
let jobs = sqlx::query_as::<_, Job>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE j.status = 'completed'
|
||||
AND (
|
||||
j.last_health_check IS NULL
|
||||
OR j.last_health_check < datetime('now', '-7 days')
|
||||
)
|
||||
ORDER BY COALESCE(j.last_health_check, '1970-01-01') ASC, j.updated_at DESC",
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
Ok(jobs)
|
||||
let pool = &self.pool;
|
||||
timed_query("get_jobs_needing_health_check", || async {
|
||||
let jobs = sqlx::query_as::<_, Job>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at
|
||||
FROM jobs j
|
||||
WHERE j.status = 'completed'
|
||||
AND (
|
||||
j.last_health_check IS NULL
|
||||
OR j.last_health_check < datetime('now', '-7 days')
|
||||
)
|
||||
ORDER BY COALESCE(j.last_health_check, '1970-01-01') ASC, j.updated_at DESC",
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
Ok(jobs)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_jobs_with_health_issues(&self) -> Result<Vec<JobWithHealthIssueRow>> {
|
||||
let jobs = sqlx::query_as::<_, JobWithHealthIssueRow>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at,
|
||||
j.health_issues
|
||||
FROM jobs j
|
||||
WHERE j.archived = 0
|
||||
AND j.health_issues IS NOT NULL
|
||||
AND TRIM(j.health_issues) != ''
|
||||
ORDER BY j.updated_at DESC",
|
||||
)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
Ok(jobs)
|
||||
let pool = &self.pool;
|
||||
timed_query("get_jobs_with_health_issues", || async {
|
||||
let jobs = sqlx::query_as::<_, JobWithHealthIssueRow>(
|
||||
"SELECT j.id, j.input_path, j.output_path, j.status,
|
||||
(SELECT reason FROM decisions WHERE job_id = j.id ORDER BY created_at DESC LIMIT 1) as decision_reason,
|
||||
COALESCE(j.priority, 0) as priority,
|
||||
COALESCE(CAST(j.progress AS REAL), 0.0) as progress,
|
||||
COALESCE(j.attempt_count, 0) as attempt_count,
|
||||
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
|
||||
j.created_at, j.updated_at,
|
||||
j.health_issues
|
||||
FROM jobs j
|
||||
WHERE j.archived = 0
|
||||
AND j.health_issues IS NOT NULL
|
||||
AND TRIM(j.health_issues) != ''
|
||||
ORDER BY j.updated_at DESC",
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
Ok(jobs)
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn reset_auth(&self) -> Result<()> {
|
||||
@@ -2150,6 +2342,20 @@ pub struct Session {
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Hash a session token using SHA256 for secure storage.
|
||||
///
|
||||
/// # Security: Timing Attack Resistance
|
||||
///
|
||||
/// Session tokens are hashed before storage and lookup. Token validation uses
|
||||
/// SQL `WHERE token = ?` with the hashed value, so the comparison occurs in
|
||||
/// SQLite rather than in Rust code. This is inherently constant-time from the
|
||||
/// application's perspective because:
|
||||
/// 1. The database performs the comparison, not our code
|
||||
/// 2. Database query time doesn't leak information about partial matches
|
||||
/// 3. No early-exit comparison in application code
|
||||
///
|
||||
/// This design makes timing attacks infeasible without requiring the `subtle`
|
||||
/// crate for constant-time comparison.
|
||||
fn hash_session_token(token: &str) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(token.as_bytes());
|
||||
|
||||
@@ -41,6 +41,9 @@ pub enum AlchemistError {
|
||||
#[error("Job paused")]
|
||||
Paused,
|
||||
|
||||
#[error("Query timeout after {0}s: {1}")]
|
||||
QueryTimeout(u64, String),
|
||||
|
||||
#[error("Unknown error: {0}")]
|
||||
Unknown(String),
|
||||
}
|
||||
|
||||
27
src/main.rs
27
src/main.rs
@@ -1,3 +1,4 @@
|
||||
use alchemist::db::EventChannels;
|
||||
use alchemist::error::Result;
|
||||
use alchemist::system::hardware;
|
||||
use alchemist::{Agent, Transcoder, config, db, runtime};
|
||||
@@ -376,8 +377,21 @@ async fn run() -> Result<()> {
|
||||
}
|
||||
info!("");
|
||||
|
||||
// 3. Initialize Broadcast Channel, Orchestrator, and Processor
|
||||
// 3. Initialize Broadcast Channels, Orchestrator, and Processor
|
||||
let services_start = Instant::now();
|
||||
|
||||
// Create separate event channels by type and volume
|
||||
let (jobs_tx, _jobs_rx) = broadcast::channel(1000); // High volume - job events
|
||||
let (config_tx, _config_rx) = broadcast::channel(50); // Low volume - config events
|
||||
let (system_tx, _system_rx) = broadcast::channel(100); // Medium volume - system events
|
||||
|
||||
let event_channels = Arc::new(EventChannels {
|
||||
jobs: jobs_tx,
|
||||
config: config_tx,
|
||||
system: system_tx,
|
||||
});
|
||||
|
||||
// Keep legacy channel for transition compatibility
|
||||
let (tx, _rx) = broadcast::channel(100);
|
||||
|
||||
// Initialize Notification Manager
|
||||
@@ -425,6 +439,7 @@ async fn run() -> Result<()> {
|
||||
config.clone(),
|
||||
hardware_state.clone(),
|
||||
tx.clone(),
|
||||
event_channels.clone(),
|
||||
args.dry_run,
|
||||
)
|
||||
.await,
|
||||
@@ -591,6 +606,7 @@ async fn run() -> Result<()> {
|
||||
agent,
|
||||
transcoder,
|
||||
scheduler: scheduler_handle,
|
||||
event_channels,
|
||||
tx,
|
||||
setup_required: setup_mode,
|
||||
config_path: config_path.clone(),
|
||||
@@ -731,6 +747,14 @@ mod tests {
|
||||
let hardware_probe_log = Arc::new(RwLock::new(hardware::HardwareProbeLog::default()));
|
||||
let transcoder = Arc::new(Transcoder::new());
|
||||
let (tx, _rx) = broadcast::channel(8);
|
||||
let (jobs_tx, _) = broadcast::channel(100);
|
||||
let (config_tx, _) = broadcast::channel(10);
|
||||
let (system_tx, _) = broadcast::channel(10);
|
||||
let event_channels = Arc::new(EventChannels {
|
||||
jobs: jobs_tx,
|
||||
config: config_tx,
|
||||
system: system_tx,
|
||||
});
|
||||
let agent = Arc::new(
|
||||
Agent::new(
|
||||
db.clone(),
|
||||
@@ -738,6 +762,7 @@ mod tests {
|
||||
config_state.clone(),
|
||||
hardware_state.clone(),
|
||||
tx,
|
||||
event_channels,
|
||||
true,
|
||||
)
|
||||
.await,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::db::{AlchemistEvent, Db, Job};
|
||||
use crate::db::{AlchemistEvent, Db, EventChannels, Job, JobEvent};
|
||||
use crate::error::Result;
|
||||
use crate::media::pipeline::{
|
||||
Encoder, ExecutionResult, ExecutionStats, Executor, MediaAnalysis, TranscodePlan,
|
||||
@@ -17,6 +17,7 @@ pub struct FfmpegExecutor {
|
||||
db: Arc<Db>,
|
||||
hw_info: Option<HardwareInfo>,
|
||||
event_tx: Arc<broadcast::Sender<AlchemistEvent>>,
|
||||
event_channels: Arc<EventChannels>,
|
||||
dry_run: bool,
|
||||
}
|
||||
|
||||
@@ -26,6 +27,7 @@ impl FfmpegExecutor {
|
||||
db: Arc<Db>,
|
||||
hw_info: Option<HardwareInfo>,
|
||||
event_tx: Arc<broadcast::Sender<AlchemistEvent>>,
|
||||
event_channels: Arc<EventChannels>,
|
||||
dry_run: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -33,6 +35,7 @@ impl FfmpegExecutor {
|
||||
db,
|
||||
hw_info,
|
||||
event_tx,
|
||||
event_channels,
|
||||
dry_run,
|
||||
}
|
||||
}
|
||||
@@ -42,15 +45,22 @@ struct JobExecutionObserver {
|
||||
job_id: i64,
|
||||
db: Arc<Db>,
|
||||
event_tx: Arc<broadcast::Sender<AlchemistEvent>>,
|
||||
event_channels: Arc<EventChannels>,
|
||||
last_progress: Mutex<Option<(f64, Instant)>>,
|
||||
}
|
||||
|
||||
impl JobExecutionObserver {
|
||||
fn new(job_id: i64, db: Arc<Db>, event_tx: Arc<broadcast::Sender<AlchemistEvent>>) -> Self {
|
||||
fn new(
|
||||
job_id: i64,
|
||||
db: Arc<Db>,
|
||||
event_tx: Arc<broadcast::Sender<AlchemistEvent>>,
|
||||
event_channels: Arc<EventChannels>,
|
||||
) -> Self {
|
||||
Self {
|
||||
job_id,
|
||||
db,
|
||||
event_tx,
|
||||
event_channels,
|
||||
last_progress: Mutex::new(None),
|
||||
}
|
||||
}
|
||||
@@ -58,6 +68,13 @@ impl JobExecutionObserver {
|
||||
|
||||
impl AsyncExecutionObserver for JobExecutionObserver {
|
||||
async fn on_log(&self, message: String) {
|
||||
// Send to typed channel
|
||||
let _ = self.event_channels.jobs.send(JobEvent::Log {
|
||||
level: "info".to_string(),
|
||||
job_id: Some(self.job_id),
|
||||
message: message.clone(),
|
||||
});
|
||||
// Also send to legacy channel for backwards compatibility
|
||||
let _ = self.event_tx.send(AlchemistEvent::Log {
|
||||
level: "info".to_string(),
|
||||
job_id: Some(self.job_id),
|
||||
@@ -100,6 +117,13 @@ impl AsyncExecutionObserver for JobExecutionObserver {
|
||||
}
|
||||
}
|
||||
|
||||
// Send to typed channel
|
||||
let _ = self.event_channels.jobs.send(JobEvent::Progress {
|
||||
job_id: self.job_id,
|
||||
percentage,
|
||||
time: progress.time.clone(),
|
||||
});
|
||||
// Also send to legacy channel for backwards compatibility
|
||||
let _ = self.event_tx.send(AlchemistEvent::Progress {
|
||||
job_id: self.job_id,
|
||||
percentage,
|
||||
@@ -132,6 +156,7 @@ impl Executor for FfmpegExecutor {
|
||||
job.id,
|
||||
self.db.clone(),
|
||||
self.event_tx.clone(),
|
||||
self.event_channels.clone(),
|
||||
));
|
||||
|
||||
self.transcoder
|
||||
@@ -350,7 +375,15 @@ mod tests {
|
||||
.await?;
|
||||
let job = db.get_job_by_input_path("input.mkv").await?.expect("job");
|
||||
let (tx, mut rx) = broadcast::channel(8);
|
||||
let observer = JobExecutionObserver::new(job.id, db.clone(), Arc::new(tx));
|
||||
let (jobs_tx, _) = broadcast::channel(100);
|
||||
let (config_tx, _) = broadcast::channel(10);
|
||||
let (system_tx, _) = broadcast::channel(10);
|
||||
let event_channels = Arc::new(crate::db::EventChannels {
|
||||
jobs: jobs_tx,
|
||||
config: config_tx,
|
||||
system: system_tx,
|
||||
});
|
||||
let observer = JobExecutionObserver::new(job.id, db.clone(), Arc::new(tx), event_channels);
|
||||
|
||||
LocalExecutionObserver::on_log(&observer, "ffmpeg line".to_string()).await;
|
||||
LocalExecutionObserver::on_progress(
|
||||
|
||||
@@ -424,6 +424,7 @@ pub struct Pipeline {
|
||||
config: Arc<RwLock<crate::config::Config>>,
|
||||
hardware_state: HardwareState,
|
||||
tx: Arc<broadcast::Sender<crate::db::AlchemistEvent>>,
|
||||
event_channels: Arc<crate::db::EventChannels>,
|
||||
dry_run: bool,
|
||||
}
|
||||
|
||||
@@ -452,6 +453,7 @@ impl Pipeline {
|
||||
config: Arc<RwLock<crate::config::Config>>,
|
||||
hardware_state: HardwareState,
|
||||
tx: Arc<broadcast::Sender<crate::db::AlchemistEvent>>,
|
||||
event_channels: Arc<crate::db::EventChannels>,
|
||||
dry_run: bool,
|
||||
) -> Self {
|
||||
Self {
|
||||
@@ -460,6 +462,7 @@ impl Pipeline {
|
||||
config,
|
||||
hardware_state,
|
||||
tx,
|
||||
event_channels,
|
||||
dry_run,
|
||||
}
|
||||
}
|
||||
@@ -552,6 +555,11 @@ async fn skip_reason_for_discovered_path(
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Creates a temporary output path for encoding.
|
||||
/// Uses a predictable `.alchemist.tmp` suffix - this is acceptable because:
|
||||
/// 1. The suffix is unique to Alchemist and unlikely to conflict
|
||||
/// 2. Files are created in user-owned media directories
|
||||
/// 3. Same-file concurrent transcodes are prevented at the job level
|
||||
fn temp_output_path_for(path: &Path) -> PathBuf {
|
||||
let parent = path.parent().unwrap_or_else(|| Path::new(""));
|
||||
let filename = path
|
||||
@@ -800,6 +808,7 @@ impl Pipeline {
|
||||
self.db.clone(),
|
||||
hw_info.clone(),
|
||||
self.tx.clone(),
|
||||
self.event_channels.clone(),
|
||||
self.dry_run,
|
||||
);
|
||||
|
||||
@@ -1427,12 +1436,21 @@ mod tests {
|
||||
detection_notes: Vec::new(),
|
||||
}));
|
||||
let (tx, _rx) = broadcast::channel(8);
|
||||
let (jobs_tx, _) = broadcast::channel(100);
|
||||
let (config_tx, _) = broadcast::channel(10);
|
||||
let (system_tx, _) = broadcast::channel(10);
|
||||
let event_channels = Arc::new(crate::db::EventChannels {
|
||||
jobs: jobs_tx,
|
||||
config: config_tx,
|
||||
system: system_tx,
|
||||
});
|
||||
let pipeline = Pipeline::new(
|
||||
db.clone(),
|
||||
Arc::new(Transcoder::new()),
|
||||
config.clone(),
|
||||
hardware_state,
|
||||
Arc::new(tx),
|
||||
event_channels,
|
||||
true,
|
||||
);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::Transcoder;
|
||||
use crate::config::Config;
|
||||
use crate::db::{AlchemistEvent, Db};
|
||||
use crate::db::{AlchemistEvent, Db, EventChannels, JobEvent, SystemEvent};
|
||||
use crate::error::Result;
|
||||
use crate::media::pipeline::Pipeline;
|
||||
use crate::media::scanner::Scanner;
|
||||
@@ -17,6 +17,7 @@ pub struct Agent {
|
||||
config: Arc<RwLock<Config>>,
|
||||
hardware_state: HardwareState,
|
||||
tx: Arc<broadcast::Sender<AlchemistEvent>>,
|
||||
event_channels: Arc<EventChannels>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
semaphore_limit: Arc<AtomicUsize>,
|
||||
held_permits: Arc<Mutex<Vec<OwnedSemaphorePermit>>>,
|
||||
@@ -35,6 +36,7 @@ impl Agent {
|
||||
config: Arc<RwLock<Config>>,
|
||||
hardware_state: HardwareState,
|
||||
tx: broadcast::Sender<AlchemistEvent>,
|
||||
event_channels: Arc<EventChannels>,
|
||||
dry_run: bool,
|
||||
) -> Self {
|
||||
// Read config asynchronously to avoid blocking atomic in async runtime
|
||||
@@ -49,6 +51,7 @@ impl Agent {
|
||||
config,
|
||||
hardware_state,
|
||||
tx: Arc::new(tx),
|
||||
event_channels,
|
||||
semaphore: Arc::new(Semaphore::new(concurrent_jobs)),
|
||||
semaphore_limit: Arc::new(AtomicUsize::new(concurrent_jobs)),
|
||||
held_permits: Arc::new(Mutex::new(Vec::new())),
|
||||
@@ -63,6 +66,10 @@ impl Agent {
|
||||
|
||||
pub async fn scan_and_enqueue(&self, directories: Vec<PathBuf>) -> Result<()> {
|
||||
info!("Starting manual scan of directories: {:?}", directories);
|
||||
|
||||
// Notify scan started via typed channel
|
||||
let _ = self.event_channels.system.send(SystemEvent::ScanStarted);
|
||||
|
||||
let files = tokio::task::spawn_blocking(move || {
|
||||
let scanner = Scanner::new();
|
||||
scanner.scan(directories)
|
||||
@@ -79,10 +86,20 @@ impl Agent {
|
||||
}
|
||||
}
|
||||
|
||||
// Notify via typed channel
|
||||
let _ = self.event_channels.jobs.send(JobEvent::StateChanged {
|
||||
job_id: 0,
|
||||
status: crate::db::JobState::Queued,
|
||||
});
|
||||
// Also send to legacy channel for backwards compatibility
|
||||
let _ = self.tx.send(AlchemistEvent::JobStateChanged {
|
||||
job_id: 0,
|
||||
status: crate::db::JobState::Queued,
|
||||
}); // Trigger UI refresh
|
||||
});
|
||||
|
||||
// Notify scan completed
|
||||
let _ = self.event_channels.system.send(SystemEvent::ScanCompleted);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -286,7 +303,50 @@ impl Agent {
|
||||
self.config.clone(),
|
||||
self.hardware_state.clone(),
|
||||
self.tx.clone(),
|
||||
self.event_channels.clone(),
|
||||
self.dry_run,
|
||||
)
|
||||
}
|
||||
|
||||
/// Gracefully shutdown the agent.
|
||||
/// Drains active jobs and waits up to `timeout` for them to complete.
|
||||
/// After timeout, forcefully cancels remaining jobs.
|
||||
pub async fn graceful_shutdown(&self, timeout: std::time::Duration) {
|
||||
info!("Initiating graceful shutdown...");
|
||||
|
||||
// Stop accepting new jobs
|
||||
self.pause();
|
||||
self.drain();
|
||||
|
||||
// Wait for active jobs to complete (with timeout)
|
||||
let start = std::time::Instant::now();
|
||||
let check_interval = std::time::Duration::from_millis(500);
|
||||
|
||||
while start.elapsed() < timeout {
|
||||
let active = self.orchestrator.active_job_count();
|
||||
if active == 0 {
|
||||
info!("All jobs completed gracefully.");
|
||||
return;
|
||||
}
|
||||
info!(
|
||||
"Waiting for {} active job(s) to complete... ({:.0}s remaining)",
|
||||
active,
|
||||
(timeout - start.elapsed()).as_secs_f64()
|
||||
);
|
||||
tokio::time::sleep(check_interval).await;
|
||||
}
|
||||
|
||||
// Timeout reached - force cancel remaining jobs
|
||||
let cancelled = self.orchestrator.cancel_all_jobs();
|
||||
if cancelled > 0 {
|
||||
tracing::warn!(
|
||||
"Shutdown timeout reached. Forcefully cancelled {} job(s).",
|
||||
cancelled
|
||||
);
|
||||
// Give FFmpeg processes a moment to terminate
|
||||
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
|
||||
}
|
||||
|
||||
info!("Graceful shutdown complete.");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,6 +112,34 @@ impl Transcoder {
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancel all currently running jobs. Used during graceful shutdown.
|
||||
pub fn cancel_all_jobs(&self) -> usize {
|
||||
let mut channels = match self.cancel_channels.lock() {
|
||||
Ok(channels) => channels,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Cancel channels lock poisoned during shutdown, recovering: {}",
|
||||
e
|
||||
);
|
||||
e.into_inner()
|
||||
}
|
||||
};
|
||||
let count = channels.len();
|
||||
for (job_id, tx) in channels.drain() {
|
||||
info!("Cancelling job {} for shutdown", job_id);
|
||||
let _ = tx.send(());
|
||||
}
|
||||
count
|
||||
}
|
||||
|
||||
/// Returns the number of currently active transcode jobs.
|
||||
pub fn active_job_count(&self) -> usize {
|
||||
match self.cancel_channels.lock() {
|
||||
Ok(channels) => channels.len(),
|
||||
Err(e) => e.into_inner().len(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn transcode_media(&self, request: TranscodeRequest<'_>) -> Result<()> {
|
||||
if request.dry_run {
|
||||
info!(
|
||||
|
||||
4727
src/server.rs
4727
src/server.rs
File diff suppressed because it is too large
Load Diff
128
src/server/auth.rs
Normal file
128
src/server/auth.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
//! Authentication handlers: login, logout, session management.
|
||||
|
||||
use super::AppState;
|
||||
use super::middleware::{allow_login_attempt, get_cookie_value};
|
||||
use argon2::{
|
||||
Argon2,
|
||||
password_hash::{PasswordHash, PasswordVerifier},
|
||||
};
|
||||
use axum::{
|
||||
extract::{ConnectInfo, Request, State},
|
||||
http::{StatusCode, header},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use rand::Rng;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub(crate) struct LoginPayload {
|
||||
username: String,
|
||||
password: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn login_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
ConnectInfo(addr): ConnectInfo<SocketAddr>,
|
||||
axum::Json(payload): axum::Json<LoginPayload>,
|
||||
) -> impl IntoResponse {
|
||||
if !allow_login_attempt(&state, addr.ip()).await {
|
||||
return (StatusCode::TOO_MANY_REQUESTS, "Too many requests").into_response();
|
||||
}
|
||||
|
||||
let user = match state.db.get_user_by_username(&payload.username).await {
|
||||
Ok(Some(u)) => u,
|
||||
_ => return (StatusCode::UNAUTHORIZED, "Invalid credentials").into_response(),
|
||||
};
|
||||
|
||||
let parsed_hash = match PasswordHash::new(&user.password_hash) {
|
||||
Ok(h) => h,
|
||||
Err(_) => {
|
||||
return (StatusCode::INTERNAL_SERVER_ERROR, "Invalid hash format").into_response();
|
||||
}
|
||||
};
|
||||
|
||||
if Argon2::default()
|
||||
.verify_password(payload.password.as_bytes(), &parsed_hash)
|
||||
.is_err()
|
||||
{
|
||||
return (StatusCode::UNAUTHORIZED, "Invalid credentials").into_response();
|
||||
}
|
||||
|
||||
// Create session
|
||||
let token: String = rand::rng()
|
||||
.sample_iter(rand::distr::Alphanumeric)
|
||||
.take(64)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
|
||||
let expires_at = Utc::now() + chrono::Duration::days(30);
|
||||
|
||||
if let Err(e) = state.db.create_session(user.id, &token, expires_at).await {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to create session: {}", e),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
let cookie = build_session_cookie(&token);
|
||||
(
|
||||
[(header::SET_COOKIE, cookie)],
|
||||
axum::Json(serde_json::json!({ "status": "ok" })),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn logout_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
req: Request,
|
||||
) -> impl IntoResponse {
|
||||
let token = req
|
||||
.headers()
|
||||
.get("Authorization")
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.and_then(|auth_str| auth_str.strip_prefix("Bearer ").map(str::to_string))
|
||||
.or_else(|| get_cookie_value(req.headers(), "alchemist_session"));
|
||||
|
||||
if let Some(t) = token {
|
||||
let _ = state.db.delete_session(&t).await;
|
||||
}
|
||||
|
||||
let cookie = build_clear_session_cookie();
|
||||
(
|
||||
[(header::SET_COOKIE, cookie)],
|
||||
axum::Json(serde_json::json!({ "status": "ok" })),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) fn build_session_cookie(token: &str) -> String {
|
||||
let mut cookie = format!(
|
||||
"alchemist_session={}; HttpOnly; SameSite=Lax; Path=/; Max-Age=2592000",
|
||||
token
|
||||
);
|
||||
if secure_cookie_enabled() {
|
||||
cookie.push_str("; Secure");
|
||||
}
|
||||
cookie
|
||||
}
|
||||
|
||||
pub(crate) fn build_clear_session_cookie() -> String {
|
||||
let mut cookie = "alchemist_session=; HttpOnly; SameSite=Lax; Path=/; Max-Age=0".to_string();
|
||||
if secure_cookie_enabled() {
|
||||
cookie.push_str("; Secure");
|
||||
}
|
||||
cookie
|
||||
}
|
||||
|
||||
fn secure_cookie_enabled() -> bool {
|
||||
match std::env::var("ALCHEMIST_COOKIE_SECURE") {
|
||||
Ok(value) => matches!(
|
||||
value.trim().to_ascii_lowercase().as_str(),
|
||||
"1" | "true" | "yes" | "on"
|
||||
),
|
||||
Err(_) => !cfg!(debug_assertions),
|
||||
}
|
||||
}
|
||||
512
src/server/jobs.rs
Normal file
512
src/server/jobs.rs
Normal file
@@ -0,0 +1,512 @@
|
||||
//! Job CRUD, batch operations, queue control handlers.
|
||||
|
||||
use super::{AppState, is_row_not_found};
|
||||
use crate::db::{Job, JobState};
|
||||
use crate::error::Result;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct BlockedJob {
|
||||
id: i64,
|
||||
status: JobState,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct BlockedJobsResponse {
|
||||
message: String,
|
||||
blocked: Vec<BlockedJob>,
|
||||
}
|
||||
|
||||
pub(crate) fn blocked_jobs_response(message: impl Into<String>, blocked: &[Job]) -> Response {
|
||||
let payload = BlockedJobsResponse {
|
||||
message: message.into(),
|
||||
blocked: blocked
|
||||
.iter()
|
||||
.map(|job| BlockedJob {
|
||||
id: job.id,
|
||||
status: job.status,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
(StatusCode::CONFLICT, axum::Json(payload)).into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn request_job_cancel(state: &AppState, job: &Job) -> Result<bool> {
|
||||
match job.status {
|
||||
JobState::Queued => {
|
||||
state
|
||||
.db
|
||||
.update_job_status(job.id, JobState::Cancelled)
|
||||
.await?;
|
||||
Ok(true)
|
||||
}
|
||||
JobState::Analyzing | JobState::Resuming => {
|
||||
if !state.transcoder.cancel_job(job.id) {
|
||||
return Ok(false);
|
||||
}
|
||||
state
|
||||
.db
|
||||
.update_job_status(job.id, JobState::Cancelled)
|
||||
.await?;
|
||||
Ok(true)
|
||||
}
|
||||
JobState::Encoding | JobState::Remuxing => Ok(state.transcoder.cancel_job(job.id)),
|
||||
_ => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct JobTableParams {
|
||||
limit: Option<i64>,
|
||||
page: Option<i64>,
|
||||
status: Option<String>,
|
||||
search: Option<String>,
|
||||
sort: Option<String>,
|
||||
sort_by: Option<String>,
|
||||
sort_desc: Option<bool>,
|
||||
archived: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn jobs_table_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Query(params): axum::extract::Query<JobTableParams>,
|
||||
) -> impl IntoResponse {
|
||||
let JobTableParams {
|
||||
limit,
|
||||
page,
|
||||
status,
|
||||
search,
|
||||
sort,
|
||||
sort_by,
|
||||
sort_desc,
|
||||
archived,
|
||||
} = params;
|
||||
|
||||
let limit = limit.unwrap_or(50).clamp(1, 200);
|
||||
let page = page.unwrap_or(1).max(1);
|
||||
let offset = (page - 1) * limit;
|
||||
|
||||
let statuses = if let Some(s) = status {
|
||||
let list: Vec<JobState> = s
|
||||
.split(',')
|
||||
.filter_map(|s| serde_json::from_value(serde_json::Value::String(s.to_string())).ok())
|
||||
.collect();
|
||||
if list.is_empty() { None } else { Some(list) }
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let archived = match archived.as_deref() {
|
||||
Some("true") => Some(true),
|
||||
Some("false") => Some(false),
|
||||
Some(_) | None => Some(false),
|
||||
};
|
||||
|
||||
match state
|
||||
.db
|
||||
.get_jobs_filtered(crate::db::JobFilterQuery {
|
||||
limit,
|
||||
offset,
|
||||
statuses,
|
||||
search,
|
||||
sort_by: sort_by.or(sort),
|
||||
sort_desc: sort_desc.unwrap_or(false),
|
||||
archived,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(jobs) => axum::Json(jobs).into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct BatchActionPayload {
|
||||
action: String,
|
||||
ids: Vec<i64>,
|
||||
}
|
||||
|
||||
pub(crate) async fn batch_jobs_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<BatchActionPayload>,
|
||||
) -> impl IntoResponse {
|
||||
let jobs = match state.db.get_jobs_by_ids(&payload.ids).await {
|
||||
Ok(jobs) => jobs,
|
||||
Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
};
|
||||
|
||||
match payload.action.as_str() {
|
||||
"cancel" => {
|
||||
let mut count = 0_u64;
|
||||
for job in &jobs {
|
||||
match request_job_cancel(&state, job).await {
|
||||
Ok(true) => count += 1,
|
||||
Ok(false) => {}
|
||||
Err(e) if is_row_not_found(&e) => {}
|
||||
Err(e) => {
|
||||
return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response();
|
||||
}
|
||||
}
|
||||
}
|
||||
axum::Json(serde_json::json!({ "count": count })).into_response()
|
||||
}
|
||||
"delete" | "restart" => {
|
||||
let blocked: Vec<_> = jobs.iter().filter(|job| job.is_active()).cloned().collect();
|
||||
if !blocked.is_empty() {
|
||||
return blocked_jobs_response(
|
||||
format!("{} is blocked while jobs are active", payload.action),
|
||||
&blocked,
|
||||
);
|
||||
}
|
||||
|
||||
let result = if payload.action == "delete" {
|
||||
state.db.batch_delete_jobs(&payload.ids).await
|
||||
} else {
|
||||
state.db.batch_restart_jobs(&payload.ids).await
|
||||
};
|
||||
|
||||
match result {
|
||||
Ok(count) => axum::Json(serde_json::json!({ "count": count })).into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
_ => (StatusCode::BAD_REQUEST, "Invalid action").into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn cancel_job_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_job_by_id(id).await {
|
||||
Ok(Some(job)) => match request_job_cancel(&state, &job).await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) if is_row_not_found(&e) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
},
|
||||
Ok(None) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn restart_failed_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.restart_failed_jobs().await {
|
||||
Ok(count) => {
|
||||
let message = if count == 0 {
|
||||
"No failed or cancelled jobs were waiting to be retried.".to_string()
|
||||
} else if count == 1 {
|
||||
"Queued 1 failed or cancelled job for retry.".to_string()
|
||||
} else {
|
||||
format!("Queued {count} failed or cancelled jobs for retry.")
|
||||
};
|
||||
axum::Json(serde_json::json!({ "count": count, "message": message })).into_response()
|
||||
}
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn clear_completed_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.clear_completed_jobs().await {
|
||||
Ok(count) => {
|
||||
let message = if count == 0 {
|
||||
"No completed jobs were waiting to be cleared.".to_string()
|
||||
} else if count == 1 {
|
||||
"Cleared 1 completed job from the queue. Historical stats were preserved."
|
||||
.to_string()
|
||||
} else {
|
||||
format!(
|
||||
"Cleared {count} completed jobs from the queue. Historical stats were preserved."
|
||||
)
|
||||
};
|
||||
axum::Json(serde_json::json!({ "count": count, "message": message })).into_response()
|
||||
}
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn restart_job_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_job_by_id(id).await {
|
||||
Ok(Some(job)) => {
|
||||
if job.is_active() {
|
||||
return blocked_jobs_response("restart is blocked while the job is active", &[job]);
|
||||
}
|
||||
if let Err(e) = state.db.batch_restart_jobs(&[job.id]).await {
|
||||
return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response();
|
||||
}
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
Ok(None) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_job_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
let job = match state.db.get_job_by_id(id).await {
|
||||
Ok(Some(job)) => job,
|
||||
Ok(None) => return StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
};
|
||||
|
||||
if job.is_active() {
|
||||
return blocked_jobs_response("delete is blocked while the job is active", &[job]);
|
||||
}
|
||||
|
||||
match state.db.delete_job(id).await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) if is_row_not_found(&e) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct UpdateJobPriorityPayload {
|
||||
priority: i32,
|
||||
}
|
||||
|
||||
pub(crate) async fn update_job_priority_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
axum::Json(payload): axum::Json<UpdateJobPriorityPayload>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.set_job_priority(id, payload.priority).await {
|
||||
Ok(_) => axum::Json(serde_json::json!({ "id": id, "priority": payload.priority }))
|
||||
.into_response(),
|
||||
Err(e) if is_row_not_found(&e) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(crate) struct JobDetailResponse {
|
||||
job: Job,
|
||||
metadata: Option<crate::media::pipeline::MediaMetadata>,
|
||||
encode_stats: Option<crate::db::DetailedEncodeStats>,
|
||||
job_logs: Vec<crate::db::LogEntry>,
|
||||
job_failure_summary: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_job_detail_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
let job = match state.db.get_job_by_id(id).await {
|
||||
Ok(Some(j)) => j,
|
||||
Ok(None) => return StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
};
|
||||
|
||||
// Avoid long probes while the job is still active.
|
||||
let metadata = match job.status {
|
||||
JobState::Queued | JobState::Analyzing | JobState::Encoding | JobState::Remuxing => None,
|
||||
_ => {
|
||||
let analyzer = crate::media::analyzer::FfmpegAnalyzer;
|
||||
use crate::media::pipeline::Analyzer;
|
||||
analyzer
|
||||
.analyze(std::path::Path::new(&job.input_path))
|
||||
.await
|
||||
.ok()
|
||||
.map(|analysis| analysis.metadata)
|
||||
}
|
||||
};
|
||||
|
||||
// Try to get encode stats (using the subquery result or a specific query)
|
||||
// For now we'll just query the encode_stats table if completed
|
||||
let encode_stats = if job.status == JobState::Completed {
|
||||
state.db.get_encode_stats_by_job_id(id).await.ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let job_logs = match state.db.get_logs_for_job(id, 200).await {
|
||||
Ok(logs) => logs,
|
||||
Err(err) => return (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
};
|
||||
|
||||
let job_failure_summary = if job.status == JobState::Failed {
|
||||
job_logs
|
||||
.iter()
|
||||
.rev()
|
||||
.find(|entry| entry.level.eq_ignore_ascii_case("error"))
|
||||
.map(|entry| entry.message.clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
axum::Json(JobDetailResponse {
|
||||
job,
|
||||
metadata,
|
||||
encode_stats,
|
||||
job_logs,
|
||||
job_failure_summary,
|
||||
})
|
||||
.into_response()
|
||||
}
|
||||
|
||||
// Engine control handlers
|
||||
|
||||
pub(crate) async fn pause_engine_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
state.agent.pause();
|
||||
axum::Json(serde_json::json!({ "status": "paused" }))
|
||||
}
|
||||
|
||||
pub(crate) async fn resume_engine_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
state.agent.resume();
|
||||
axum::Json(serde_json::json!({ "status": "running" }))
|
||||
}
|
||||
|
||||
pub(crate) async fn drain_engine_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
state.agent.drain();
|
||||
axum::Json(serde_json::json!({ "status": "draining" }))
|
||||
}
|
||||
|
||||
pub(crate) async fn stop_drain_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
state.agent.stop_drain();
|
||||
axum::Json(serde_json::json!({ "status": "running" }))
|
||||
}
|
||||
|
||||
pub(crate) async fn engine_status_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
axum::Json(serde_json::json!({
|
||||
"status": if state.agent.is_draining() {
|
||||
"draining"
|
||||
} else if state.agent.is_paused() {
|
||||
"paused"
|
||||
} else {
|
||||
"running"
|
||||
},
|
||||
"manual_paused": state.agent.is_manual_paused(),
|
||||
"scheduler_paused": state.agent.is_scheduler_paused(),
|
||||
"draining": state.agent.is_draining(),
|
||||
"mode": state.agent.current_mode().await.as_str(),
|
||||
"concurrent_limit": state.agent.concurrent_jobs_limit(),
|
||||
"is_manual_override": state.agent.is_manual_override(),
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) async fn get_engine_mode_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
let cpu_count = {
|
||||
let sys = state.sys.lock().await;
|
||||
sys.cpus().len()
|
||||
};
|
||||
drop(config);
|
||||
axum::Json(serde_json::json!({
|
||||
"mode": state.agent.current_mode().await.as_str(),
|
||||
"is_manual_override": state.agent.is_manual_override(),
|
||||
"concurrent_limit": state.agent.concurrent_jobs_limit(),
|
||||
"cpu_count": cpu_count,
|
||||
"computed_limits": {
|
||||
"background": crate::config::EngineMode::Background
|
||||
.concurrent_jobs_for_cpu_count(cpu_count),
|
||||
"balanced": crate::config::EngineMode::Balanced
|
||||
.concurrent_jobs_for_cpu_count(cpu_count),
|
||||
"throughput": crate::config::EngineMode::Throughput
|
||||
.concurrent_jobs_for_cpu_count(cpu_count),
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct SetEngineModePayload {
|
||||
mode: crate::config::EngineMode,
|
||||
// Optional manual override of concurrent jobs.
|
||||
// If provided, bypasses mode auto-computation.
|
||||
concurrent_jobs_override: Option<usize>,
|
||||
// Optional manual thread override (0 = auto).
|
||||
threads_override: Option<usize>,
|
||||
}
|
||||
|
||||
pub(crate) async fn set_engine_mode_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<SetEngineModePayload>,
|
||||
) -> impl IntoResponse {
|
||||
let cpu_count = {
|
||||
let sys = state.sys.lock().await;
|
||||
sys.cpus().len()
|
||||
};
|
||||
|
||||
if let Some(override_jobs) = payload.concurrent_jobs_override {
|
||||
if override_jobs == 0 {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"concurrent_jobs_override must be > 0",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
state.agent.set_manual_override(true);
|
||||
state.agent.set_concurrent_jobs(override_jobs).await;
|
||||
*state.agent.engine_mode.write().await = payload.mode;
|
||||
} else {
|
||||
state.agent.apply_mode(payload.mode, cpu_count).await;
|
||||
}
|
||||
|
||||
// Apply thread override to config if provided
|
||||
if let Some(threads) = payload.threads_override {
|
||||
let mut config = state.config.write().await;
|
||||
config.transcode.threads = threads;
|
||||
}
|
||||
|
||||
// Persist mode to config
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
config.system.engine_mode = payload.mode;
|
||||
}
|
||||
let config = state.config.read().await;
|
||||
if let Err(e) = super::save_config_or_response(&state, &config).await {
|
||||
return *e;
|
||||
}
|
||||
|
||||
axum::Json(serde_json::json!({
|
||||
"status": "ok",
|
||||
"mode": payload.mode.as_str(),
|
||||
"concurrent_limit": state.agent.concurrent_jobs_limit(),
|
||||
"is_manual_override": state.agent.is_manual_override(),
|
||||
}))
|
||||
.into_response()
|
||||
}
|
||||
|
||||
// Logs handlers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct LogParams {
|
||||
page: Option<i64>,
|
||||
limit: Option<i64>,
|
||||
}
|
||||
|
||||
pub(crate) async fn logs_history_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::extract::Query(params): axum::extract::Query<LogParams>,
|
||||
) -> impl IntoResponse {
|
||||
let limit = params.limit.unwrap_or(50).clamp(1, 200);
|
||||
let page = params.page.unwrap_or(1).max(1);
|
||||
let offset = (page - 1) * limit;
|
||||
|
||||
match state.db.get_logs(limit, offset).await {
|
||||
Ok(logs) => axum::Json(logs).into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn clear_logs_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
match state.db.clear_logs().await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
210
src/server/middleware.rs
Normal file
210
src/server/middleware.rs
Normal file
@@ -0,0 +1,210 @@
|
||||
//! Authentication, rate limiting, and security middleware.
|
||||
|
||||
use super::AppState;
|
||||
use axum::{
|
||||
extract::{ConnectInfo, Request, State},
|
||||
http::{StatusCode, header},
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::Instant;
|
||||
use tokio::time::Duration;
|
||||
|
||||
pub(crate) struct RateLimitEntry {
|
||||
pub(crate) tokens: f64,
|
||||
pub(crate) last_refill: Instant,
|
||||
}
|
||||
|
||||
pub(crate) const LOGIN_RATE_LIMIT_CAPACITY: f64 = 10.0;
|
||||
pub(crate) const LOGIN_RATE_LIMIT_REFILL_PER_SEC: f64 = 1.0;
|
||||
pub(crate) const GLOBAL_RATE_LIMIT_CAPACITY: f64 = 120.0;
|
||||
pub(crate) const GLOBAL_RATE_LIMIT_REFILL_PER_SEC: f64 = 60.0;
|
||||
|
||||
/// Middleware to add security headers to all responses.
|
||||
pub(crate) async fn security_headers_middleware(request: Request, next: Next) -> Response {
|
||||
let mut response = next.run(request).await;
|
||||
let headers = response.headers_mut();
|
||||
|
||||
// Prevent clickjacking
|
||||
headers.insert(header::X_FRAME_OPTIONS, "DENY".parse().unwrap());
|
||||
|
||||
// Prevent MIME type sniffing
|
||||
headers.insert(header::X_CONTENT_TYPE_OPTIONS, "nosniff".parse().unwrap());
|
||||
|
||||
// XSS protection (legacy but still useful)
|
||||
headers.insert(
|
||||
"X-XSS-Protection"
|
||||
.parse::<axum::http::HeaderName>()
|
||||
.unwrap(),
|
||||
"1; mode=block".parse().unwrap(),
|
||||
);
|
||||
|
||||
// Content Security Policy - allows inline scripts/styles for the SPA
|
||||
// This is permissive enough for the app while still providing protection
|
||||
headers.insert(
|
||||
header::CONTENT_SECURITY_POLICY,
|
||||
"default-src 'self'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; connect-src 'self'; font-src 'self'; frame-ancestors 'none'"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
// Referrer policy
|
||||
headers.insert(
|
||||
header::REFERRER_POLICY,
|
||||
"strict-origin-when-cross-origin".parse().unwrap(),
|
||||
);
|
||||
|
||||
// Permissions policy (restrict browser features)
|
||||
headers.insert(
|
||||
"Permissions-Policy"
|
||||
.parse::<axum::http::HeaderName>()
|
||||
.unwrap(),
|
||||
"geolocation=(), microphone=(), camera=()".parse().unwrap(),
|
||||
);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
pub(crate) async fn auth_middleware(
|
||||
State(state): State<Arc<AppState>>,
|
||||
req: Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
let path = req.uri().path();
|
||||
|
||||
// 1. API Protection: Only lock down /api routes
|
||||
if path.starts_with("/api") {
|
||||
// Public API endpoints
|
||||
if path.starts_with("/api/setup")
|
||||
|| path.starts_with("/api/auth/login")
|
||||
|| path.starts_with("/api/auth/logout")
|
||||
|| path == "/api/health"
|
||||
|| path == "/api/ready"
|
||||
{
|
||||
return next.run(req).await;
|
||||
}
|
||||
|
||||
if state.setup_required.load(Ordering::Relaxed) && path == "/api/system/hardware" {
|
||||
return next.run(req).await;
|
||||
}
|
||||
if state.setup_required.load(Ordering::Relaxed) && path.starts_with("/api/fs/") {
|
||||
return next.run(req).await;
|
||||
}
|
||||
if state.setup_required.load(Ordering::Relaxed) && path == "/api/settings/bundle" {
|
||||
return next.run(req).await;
|
||||
}
|
||||
|
||||
// Protected API endpoints -> Require Token
|
||||
let mut token = req
|
||||
.headers()
|
||||
.get("Authorization")
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.and_then(|auth_str| auth_str.strip_prefix("Bearer ").map(str::to_string));
|
||||
|
||||
if token.is_none() {
|
||||
token = get_cookie_value(req.headers(), "alchemist_session");
|
||||
}
|
||||
|
||||
if let Some(t) = token {
|
||||
if let Ok(Some(_session)) = state.db.get_session(&t).await {
|
||||
return next.run(req).await;
|
||||
}
|
||||
}
|
||||
|
||||
return (StatusCode::UNAUTHORIZED, "Unauthorized").into_response();
|
||||
}
|
||||
|
||||
// 2. Static Assets / Frontend Pages
|
||||
// Allow everything else. The frontend app (Layout.astro) handles client-side redirects
|
||||
// if the user isn't authenticated, and the backend API protects the actual data.
|
||||
next.run(req).await
|
||||
}
|
||||
|
||||
pub(crate) async fn rate_limit_middleware(
|
||||
State(state): State<Arc<AppState>>,
|
||||
req: Request,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
if !req.uri().path().starts_with("/api/") {
|
||||
return next.run(req).await;
|
||||
}
|
||||
|
||||
let ip = request_ip(&req).unwrap_or(IpAddr::from([0, 0, 0, 0]));
|
||||
if !allow_global_request(&state, ip).await {
|
||||
return (StatusCode::TOO_MANY_REQUESTS, "Too many requests").into_response();
|
||||
}
|
||||
next.run(req).await
|
||||
}
|
||||
|
||||
pub(crate) async fn allow_login_attempt(state: &AppState, ip: IpAddr) -> bool {
|
||||
let mut limiter = state.login_rate_limiter.lock().await;
|
||||
let now = Instant::now();
|
||||
let cleanup_after = Duration::from_secs(60 * 60);
|
||||
limiter.retain(|_, entry| now.duration_since(entry.last_refill) <= cleanup_after);
|
||||
|
||||
let entry = limiter.entry(ip).or_insert(RateLimitEntry {
|
||||
tokens: LOGIN_RATE_LIMIT_CAPACITY,
|
||||
last_refill: now,
|
||||
});
|
||||
|
||||
let elapsed = now.duration_since(entry.last_refill).as_secs_f64();
|
||||
if elapsed > 0.0 {
|
||||
let refill = elapsed * LOGIN_RATE_LIMIT_REFILL_PER_SEC;
|
||||
entry.tokens = (entry.tokens + refill).min(LOGIN_RATE_LIMIT_CAPACITY);
|
||||
entry.last_refill = now;
|
||||
}
|
||||
|
||||
if entry.tokens >= 1.0 {
|
||||
entry.tokens -= 1.0;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
async fn allow_global_request(state: &AppState, ip: IpAddr) -> bool {
|
||||
let mut limiter = state.global_rate_limiter.lock().await;
|
||||
let now = Instant::now();
|
||||
let cleanup_after = Duration::from_secs(60 * 60);
|
||||
limiter.retain(|_, entry| now.duration_since(entry.last_refill) <= cleanup_after);
|
||||
let entry = limiter.entry(ip).or_insert(RateLimitEntry {
|
||||
tokens: GLOBAL_RATE_LIMIT_CAPACITY,
|
||||
last_refill: now,
|
||||
});
|
||||
|
||||
let elapsed = now.duration_since(entry.last_refill).as_secs_f64();
|
||||
if elapsed > 0.0 {
|
||||
let refill = elapsed * GLOBAL_RATE_LIMIT_REFILL_PER_SEC;
|
||||
entry.tokens = (entry.tokens + refill).min(GLOBAL_RATE_LIMIT_CAPACITY);
|
||||
entry.last_refill = now;
|
||||
}
|
||||
|
||||
if entry.tokens >= 1.0 {
|
||||
entry.tokens -= 1.0;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_cookie_value(headers: &axum::http::HeaderMap, name: &str) -> Option<String> {
|
||||
let cookie_header = headers.get(header::COOKIE)?.to_str().ok()?;
|
||||
for part in cookie_header.split(';') {
|
||||
let mut iter = part.trim().splitn(2, '=');
|
||||
let key = iter.next()?.trim();
|
||||
let value = iter.next()?.trim();
|
||||
if key == name {
|
||||
return Some(value.to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub(crate) fn request_ip(req: &Request) -> Option<IpAddr> {
|
||||
req.extensions()
|
||||
.get::<ConnectInfo<SocketAddr>>()
|
||||
.map(|info| info.0.ip())
|
||||
}
|
||||
780
src/server/mod.rs
Normal file
780
src/server/mod.rs
Normal file
@@ -0,0 +1,780 @@
|
||||
//! HTTP server module: routes, state, middleware, and API handlers.
|
||||
|
||||
pub mod auth;
|
||||
pub mod jobs;
|
||||
pub mod middleware;
|
||||
pub mod scan;
|
||||
pub mod settings;
|
||||
pub mod sse;
|
||||
pub mod stats;
|
||||
pub mod system;
|
||||
pub mod wizard;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use crate::Agent;
|
||||
use crate::Transcoder;
|
||||
use crate::config::Config;
|
||||
use crate::db::{AlchemistEvent, Db, EventChannels};
|
||||
use crate::error::{AlchemistError, Result};
|
||||
use crate::system::hardware::{HardwareInfo, HardwareProbeLog, HardwareState};
|
||||
use axum::{
|
||||
Router,
|
||||
http::{StatusCode, Uri, header},
|
||||
middleware as axum_middleware,
|
||||
response::{IntoResponse, Response},
|
||||
routing::{delete, get, post},
|
||||
};
|
||||
#[cfg(feature = "embed-web")]
|
||||
use rust_embed::RustEmbed;
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::{Path as FsPath, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::time::Instant;
|
||||
use tokio::net::lookup_host;
|
||||
use tokio::sync::{Mutex, RwLock, broadcast};
|
||||
use tokio::time::Duration;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
use middleware::RateLimitEntry;
|
||||
|
||||
#[cfg(feature = "embed-web")]
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "web/dist/"]
|
||||
struct Assets;
|
||||
|
||||
fn load_static_asset(path: &str) -> Option<Vec<u8>> {
|
||||
sanitize_asset_path(path)?;
|
||||
|
||||
#[cfg(feature = "embed-web")]
|
||||
if let Some(content) = Assets::get(path) {
|
||||
return Some(content.data.into_owned());
|
||||
}
|
||||
|
||||
let full_path = PathBuf::from("web/dist").join(path);
|
||||
fs::read(full_path).ok()
|
||||
}
|
||||
|
||||
pub struct AppState {
|
||||
pub db: Arc<Db>,
|
||||
pub config: Arc<RwLock<Config>>,
|
||||
pub agent: Arc<Agent>,
|
||||
pub transcoder: Arc<Transcoder>,
|
||||
pub scheduler: crate::scheduler::SchedulerHandle,
|
||||
pub event_channels: Arc<EventChannels>,
|
||||
pub tx: broadcast::Sender<AlchemistEvent>, // Legacy channel for transition
|
||||
pub setup_required: Arc<AtomicBool>,
|
||||
pub start_time: Instant,
|
||||
pub telemetry_runtime_id: String,
|
||||
pub notification_manager: Arc<crate::notifications::NotificationManager>,
|
||||
pub sys: Mutex<sysinfo::System>,
|
||||
pub file_watcher: Arc<crate::system::watcher::FileWatcher>,
|
||||
pub library_scanner: Arc<crate::system::scanner::LibraryScanner>,
|
||||
pub config_path: PathBuf,
|
||||
pub config_mutable: bool,
|
||||
pub hardware_state: HardwareState,
|
||||
pub hardware_probe_log: Arc<tokio::sync::RwLock<HardwareProbeLog>>,
|
||||
pub resources_cache: Arc<tokio::sync::Mutex<Option<(serde_json::Value, std::time::Instant)>>>,
|
||||
pub(crate) login_rate_limiter: Mutex<HashMap<IpAddr, RateLimitEntry>>,
|
||||
pub(crate) global_rate_limiter: Mutex<HashMap<IpAddr, RateLimitEntry>>,
|
||||
}
|
||||
|
||||
pub struct RunServerArgs {
|
||||
pub db: Arc<Db>,
|
||||
pub config: Arc<RwLock<Config>>,
|
||||
pub agent: Arc<Agent>,
|
||||
pub transcoder: Arc<Transcoder>,
|
||||
pub scheduler: crate::scheduler::SchedulerHandle,
|
||||
pub event_channels: Arc<EventChannels>,
|
||||
pub tx: broadcast::Sender<AlchemistEvent>, // Legacy channel for transition
|
||||
pub setup_required: bool,
|
||||
pub config_path: PathBuf,
|
||||
pub config_mutable: bool,
|
||||
pub hardware_state: HardwareState,
|
||||
pub hardware_probe_log: Arc<tokio::sync::RwLock<HardwareProbeLog>>,
|
||||
pub notification_manager: Arc<crate::notifications::NotificationManager>,
|
||||
pub file_watcher: Arc<crate::system::watcher::FileWatcher>,
|
||||
}
|
||||
|
||||
pub async fn run_server(args: RunServerArgs) -> Result<()> {
|
||||
let RunServerArgs {
|
||||
db,
|
||||
config,
|
||||
agent,
|
||||
transcoder,
|
||||
scheduler,
|
||||
event_channels,
|
||||
tx,
|
||||
setup_required,
|
||||
config_path,
|
||||
config_mutable,
|
||||
hardware_state,
|
||||
hardware_probe_log,
|
||||
notification_manager,
|
||||
file_watcher,
|
||||
} = args;
|
||||
#[cfg(not(feature = "embed-web"))]
|
||||
{
|
||||
let web_dist = PathBuf::from("web/dist");
|
||||
if !web_dist.exists() {
|
||||
let cwd = std::env::current_dir()
|
||||
.map(|p| format!("{}/", p.display()))
|
||||
.unwrap_or_default();
|
||||
warn!(
|
||||
"web/dist not found at {}web/dist — frontend will not be served. \
|
||||
Build it first with `just web-build` or run from the repo root.",
|
||||
cwd
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize sysinfo
|
||||
let mut sys = sysinfo::System::new();
|
||||
sys.refresh_cpu_usage();
|
||||
sys.refresh_memory();
|
||||
|
||||
let library_scanner = Arc::new(crate::system::scanner::LibraryScanner::new(
|
||||
db.clone(),
|
||||
config.clone(),
|
||||
));
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
db,
|
||||
config,
|
||||
agent,
|
||||
transcoder,
|
||||
scheduler,
|
||||
event_channels,
|
||||
tx,
|
||||
setup_required: Arc::new(AtomicBool::new(setup_required)),
|
||||
start_time: std::time::Instant::now(),
|
||||
telemetry_runtime_id: Uuid::new_v4().to_string(),
|
||||
notification_manager,
|
||||
sys: Mutex::new(sys),
|
||||
file_watcher,
|
||||
library_scanner,
|
||||
config_path,
|
||||
config_mutable,
|
||||
hardware_state,
|
||||
hardware_probe_log,
|
||||
resources_cache: Arc::new(tokio::sync::Mutex::new(None)),
|
||||
login_rate_limiter: Mutex::new(HashMap::new()),
|
||||
global_rate_limiter: Mutex::new(HashMap::new()),
|
||||
});
|
||||
|
||||
// Clone agent for shutdown handler before moving state into router
|
||||
let shutdown_agent = state.agent.clone();
|
||||
|
||||
let app = app_router(state);
|
||||
|
||||
let port = std::env::var("ALCHEMIST_SERVER_PORT")
|
||||
.ok()
|
||||
.filter(|value| !value.trim().is_empty())
|
||||
.map(|value| {
|
||||
value.trim().parse::<u16>().map_err(|_| {
|
||||
AlchemistError::Config("ALCHEMIST_SERVER_PORT must be a valid u16".to_string())
|
||||
})
|
||||
})
|
||||
.transpose()?
|
||||
.unwrap_or(3000);
|
||||
let addr = format!("0.0.0.0:{port}");
|
||||
info!("listening on http://{}", addr);
|
||||
let listener = tokio::net::TcpListener::bind(&addr)
|
||||
.await
|
||||
.map_err(AlchemistError::Io)?;
|
||||
|
||||
// Run server with graceful shutdown on Ctrl+C
|
||||
axum::serve(
|
||||
listener,
|
||||
app.into_make_service_with_connect_info::<SocketAddr>(),
|
||||
)
|
||||
.with_graceful_shutdown(async move {
|
||||
// Wait for shutdown signal
|
||||
let ctrl_c = async {
|
||||
tokio::signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install signal handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {
|
||||
info!("Received Ctrl+C, initiating graceful shutdown...");
|
||||
}
|
||||
_ = terminate => {
|
||||
info!("Received SIGTERM, initiating graceful shutdown...");
|
||||
}
|
||||
}
|
||||
|
||||
// Give active jobs up to 5 minutes to complete
|
||||
shutdown_agent
|
||||
.graceful_shutdown(std::time::Duration::from_secs(300))
|
||||
.await;
|
||||
})
|
||||
.await
|
||||
.map_err(|e| AlchemistError::Unknown(format!("Server error: {}", e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn app_router(state: Arc<AppState>) -> Router {
|
||||
use auth::*;
|
||||
use jobs::*;
|
||||
use scan::*;
|
||||
use settings::*;
|
||||
use sse::*;
|
||||
use stats::*;
|
||||
use system::*;
|
||||
use wizard::*;
|
||||
|
||||
Router::new()
|
||||
// API Routes
|
||||
.route("/api/scan/start", post(start_scan_handler))
|
||||
.route("/api/scan/status", get(get_scan_status_handler))
|
||||
.route("/api/scan", post(scan_handler))
|
||||
.route("/api/stats", get(stats_handler))
|
||||
.route("/api/stats/aggregated", get(aggregated_stats_handler))
|
||||
.route("/api/stats/daily", get(daily_stats_handler))
|
||||
.route("/api/stats/detailed", get(detailed_stats_handler))
|
||||
.route("/api/stats/savings", get(savings_summary_handler))
|
||||
// Canonical job list endpoint.
|
||||
.route("/api/jobs", get(jobs_table_handler))
|
||||
.route("/api/jobs/table", get(jobs_table_handler))
|
||||
.route("/api/jobs/batch", post(batch_jobs_handler))
|
||||
.route("/api/logs/history", get(logs_history_handler))
|
||||
.route("/api/logs", delete(clear_logs_handler))
|
||||
.route("/api/jobs/restart-failed", post(restart_failed_handler))
|
||||
.route("/api/jobs/clear-completed", post(clear_completed_handler))
|
||||
.route("/api/jobs/:id/cancel", post(cancel_job_handler))
|
||||
.route("/api/jobs/:id/priority", post(update_job_priority_handler))
|
||||
.route("/api/jobs/:id/restart", post(restart_job_handler))
|
||||
.route("/api/jobs/:id/delete", post(delete_job_handler))
|
||||
.route("/api/jobs/:id/details", get(get_job_detail_handler))
|
||||
.route("/api/events", get(sse_handler))
|
||||
.route("/api/engine/pause", post(pause_engine_handler))
|
||||
.route("/api/engine/resume", post(resume_engine_handler))
|
||||
.route("/api/engine/drain", post(drain_engine_handler))
|
||||
.route("/api/engine/stop-drain", post(stop_drain_handler))
|
||||
.route(
|
||||
"/api/engine/mode",
|
||||
get(get_engine_mode_handler).post(set_engine_mode_handler),
|
||||
)
|
||||
.route("/api/engine/status", get(engine_status_handler))
|
||||
.route(
|
||||
"/api/settings/transcode",
|
||||
get(get_transcode_settings_handler).post(update_transcode_settings_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/system",
|
||||
get(get_system_settings_handler).post(update_system_settings_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/bundle",
|
||||
get(get_settings_bundle_handler).put(update_settings_bundle_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/preferences",
|
||||
post(set_setting_preference_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/preferences/:key",
|
||||
get(get_setting_preference_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/config",
|
||||
get(get_settings_config_handler).put(update_settings_config_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/watch-dirs",
|
||||
get(get_watch_dirs_handler).post(add_watch_dir_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/watch-dirs/:id",
|
||||
delete(remove_watch_dir_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/watch-dirs/:id/profile",
|
||||
axum::routing::patch(assign_watch_dir_profile_handler),
|
||||
)
|
||||
.route("/api/profiles/presets", get(get_profile_presets_handler))
|
||||
.route(
|
||||
"/api/profiles",
|
||||
get(list_profiles_handler).post(create_profile_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/profiles/:id",
|
||||
axum::routing::put(update_profile_handler).delete(delete_profile_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/notifications",
|
||||
get(get_notifications_handler).post(add_notification_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/notifications/:id",
|
||||
delete(delete_notification_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/notifications/test",
|
||||
post(test_notification_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/files",
|
||||
get(get_file_settings_handler).post(update_file_settings_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/schedule",
|
||||
get(get_schedule_handler).post(add_schedule_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/hardware",
|
||||
get(get_hardware_settings_handler).post(update_hardware_settings_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/settings/schedule/:id",
|
||||
delete(delete_schedule_handler),
|
||||
)
|
||||
// Health Check Routes
|
||||
.route("/api/health", get(health_handler))
|
||||
.route("/api/ready", get(ready_handler))
|
||||
// System Routes
|
||||
.route("/api/system/resources", get(system_resources_handler))
|
||||
.route("/api/system/info", get(get_system_info_handler))
|
||||
.route("/api/system/hardware", get(get_hardware_info_handler))
|
||||
.route(
|
||||
"/api/system/hardware/probe-log",
|
||||
get(get_hardware_probe_log_handler),
|
||||
)
|
||||
.route("/api/library/health", get(library_health_handler))
|
||||
.route(
|
||||
"/api/library/health/scan",
|
||||
post(start_library_health_scan_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/library/health/scan/:id",
|
||||
post(rescan_library_health_issue_handler),
|
||||
)
|
||||
.route(
|
||||
"/api/library/health/issues",
|
||||
get(get_library_health_issues_handler),
|
||||
)
|
||||
.route("/api/fs/browse", get(fs_browse_handler))
|
||||
.route("/api/fs/recommendations", get(fs_recommendations_handler))
|
||||
.route("/api/fs/preview", post(fs_preview_handler))
|
||||
.route("/api/telemetry/payload", get(telemetry_payload_handler))
|
||||
// Setup Routes
|
||||
.route("/api/setup/status", get(setup_status_handler))
|
||||
.route("/api/setup/complete", post(setup_complete_handler))
|
||||
.route("/api/auth/login", post(login_handler))
|
||||
.route("/api/auth/logout", post(logout_handler))
|
||||
.route(
|
||||
"/api/ui/preferences",
|
||||
get(get_preferences_handler).post(update_preferences_handler),
|
||||
)
|
||||
// Static Asset Routes
|
||||
.route("/", get(index_handler))
|
||||
.route("/*file", get(static_handler))
|
||||
.layer(axum_middleware::from_fn(
|
||||
middleware::security_headers_middleware,
|
||||
))
|
||||
.layer(axum_middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::auth_middleware,
|
||||
))
|
||||
.layer(axum_middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
middleware::rate_limit_middleware,
|
||||
))
|
||||
.with_state(state)
|
||||
}
|
||||
|
||||
// Helper functions used by multiple modules
|
||||
|
||||
pub(crate) async fn refresh_file_watcher(state: &AppState) {
|
||||
let config = state.config.read().await.clone();
|
||||
if let Err(e) = crate::system::watcher::refresh_from_sources(
|
||||
state.file_watcher.as_ref(),
|
||||
state.db.as_ref(),
|
||||
&config,
|
||||
state.setup_required.load(Ordering::Relaxed),
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("Failed to update file watcher: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn replace_runtime_hardware(
|
||||
state: &AppState,
|
||||
hardware_info: HardwareInfo,
|
||||
probe_log: HardwareProbeLog,
|
||||
) {
|
||||
state.hardware_state.replace(Some(hardware_info)).await;
|
||||
*state.hardware_probe_log.write().await = probe_log;
|
||||
}
|
||||
|
||||
pub(crate) fn config_write_blocked_response(config_path: &FsPath) -> Response {
|
||||
(
|
||||
StatusCode::CONFLICT,
|
||||
format!(
|
||||
"Configuration updates are disabled (ALCHEMIST_CONFIG_MUTABLE=false). \
|
||||
Set ALCHEMIST_CONFIG_MUTABLE=true and ensure {:?} is writable.",
|
||||
config_path
|
||||
),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) fn config_save_error_to_response(config_path: &FsPath, err: &anyhow::Error) -> Response {
|
||||
if let Some(io_err) = err.downcast_ref::<std::io::Error>() {
|
||||
let read_only = io_err
|
||||
.to_string()
|
||||
.to_ascii_lowercase()
|
||||
.contains("read-only");
|
||||
if io_err.kind() == std::io::ErrorKind::PermissionDenied || read_only {
|
||||
return (
|
||||
StatusCode::CONFLICT,
|
||||
format!(
|
||||
"Configuration file {:?} is not writable: {}",
|
||||
config_path, io_err
|
||||
),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to save config at {:?}: {}", config_path, err),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn save_config_or_response(
|
||||
state: &AppState,
|
||||
config: &Config,
|
||||
) -> std::result::Result<(), Box<Response>> {
|
||||
if !state.config_mutable {
|
||||
return Err(Box::new(config_write_blocked_response(&state.config_path)));
|
||||
}
|
||||
|
||||
if let Some(parent) = state.config_path.parent() {
|
||||
if !parent.as_os_str().is_empty() && !parent.exists() {
|
||||
if let Err(err) = std::fs::create_dir_all(parent) {
|
||||
return Err(config_save_error_to_response(
|
||||
&state.config_path,
|
||||
&anyhow::Error::new(err),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = crate::settings::save_config_and_project(
|
||||
state.db.as_ref(),
|
||||
state.config_path.as_path(),
|
||||
config,
|
||||
)
|
||||
.await
|
||||
{
|
||||
return Err(config_save_error_to_response(
|
||||
&state.config_path,
|
||||
&anyhow::Error::msg(err.to_string()),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn config_read_error_response(context: &str, err: &AlchemistError) -> Response {
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to {context}: {err}"),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) fn hardware_error_response(err: &AlchemistError) -> Response {
|
||||
let status = match err {
|
||||
AlchemistError::Config(_) | AlchemistError::Hardware(_) => StatusCode::BAD_REQUEST,
|
||||
_ => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
};
|
||||
(status, err.to_string()).into_response()
|
||||
}
|
||||
|
||||
pub(crate) fn validate_transcode_payload(
|
||||
payload: &settings::TranscodeSettingsPayload,
|
||||
) -> std::result::Result<(), &'static str> {
|
||||
if payload.concurrent_jobs == 0 {
|
||||
return Err("concurrent_jobs must be > 0");
|
||||
}
|
||||
if !(0.0..=1.0).contains(&payload.size_reduction_threshold) {
|
||||
return Err("size_reduction_threshold must be 0.0-1.0");
|
||||
}
|
||||
if payload.min_bpp_threshold < 0.0 {
|
||||
return Err("min_bpp_threshold must be >= 0.0");
|
||||
}
|
||||
if payload.threads > 512 {
|
||||
return Err("threads must be <= 512");
|
||||
}
|
||||
if !(50.0..=1000.0).contains(&payload.tonemap_peak) {
|
||||
return Err("tonemap_peak must be between 50 and 1000");
|
||||
}
|
||||
if !(0.0..=1.0).contains(&payload.tonemap_desat) {
|
||||
return Err("tonemap_desat must be between 0.0 and 1.0");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn canonicalize_directory_path(
|
||||
value: &str,
|
||||
field_name: &str,
|
||||
) -> std::result::Result<PathBuf, String> {
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Err(format!("{field_name} must not be empty"));
|
||||
}
|
||||
if trimmed.contains('\0') {
|
||||
return Err(format!("{field_name} must not contain null bytes"));
|
||||
}
|
||||
|
||||
let path = PathBuf::from(trimmed);
|
||||
if !path.is_dir() {
|
||||
return Err(format!("{field_name} must be an existing directory"));
|
||||
}
|
||||
|
||||
fs::canonicalize(&path).map_err(|_| format!("{field_name} must be canonicalizable"))
|
||||
}
|
||||
|
||||
pub(crate) fn normalize_optional_directory(
|
||||
value: Option<&str>,
|
||||
field_name: &str,
|
||||
) -> std::result::Result<Option<String>, String> {
|
||||
let Some(value) = value else {
|
||||
return Ok(None);
|
||||
};
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
canonicalize_directory_path(trimmed, field_name)
|
||||
.map(|path| Some(path.to_string_lossy().to_string()))
|
||||
}
|
||||
|
||||
pub(crate) fn normalize_optional_path(
|
||||
value: Option<&str>,
|
||||
field_name: &str,
|
||||
) -> std::result::Result<Option<String>, String> {
|
||||
let Some(value) = value else {
|
||||
return Ok(None);
|
||||
};
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
if trimmed.contains('\0') {
|
||||
return Err(format!("{field_name} must not contain null bytes"));
|
||||
}
|
||||
|
||||
if cfg!(target_os = "linux") {
|
||||
let path = PathBuf::from(trimmed);
|
||||
if !path.exists() {
|
||||
return Err(format!("{field_name} must exist"));
|
||||
}
|
||||
return fs::canonicalize(path)
|
||||
.map(|path| Some(path.to_string_lossy().to_string()))
|
||||
.map_err(|_| format!("{field_name} must be canonicalizable"));
|
||||
}
|
||||
|
||||
Ok(Some(trimmed.to_string()))
|
||||
}
|
||||
|
||||
pub(crate) fn is_row_not_found(err: &AlchemistError) -> bool {
|
||||
matches!(err, AlchemistError::Database(sqlx::Error::RowNotFound))
|
||||
}
|
||||
|
||||
pub(crate) fn has_path_separator(value: &str) -> bool {
|
||||
value.chars().any(|c| c == '/' || c == '\\')
|
||||
}
|
||||
|
||||
pub(crate) fn normalize_schedule_time(value: &str) -> Option<String> {
|
||||
let trimmed = value.trim();
|
||||
let parts: Vec<&str> = trimmed.split(':').collect();
|
||||
if parts.len() != 2 {
|
||||
return None;
|
||||
}
|
||||
let hour: u32 = parts[0].parse().ok()?;
|
||||
let minute: u32 = parts[1].parse().ok()?;
|
||||
if hour > 23 || minute > 59 {
|
||||
return None;
|
||||
}
|
||||
Some(format!("{:02}:{:02}", hour, minute))
|
||||
}
|
||||
|
||||
pub(crate) async fn validate_notification_url(raw: &str) -> std::result::Result<(), String> {
|
||||
let url =
|
||||
reqwest::Url::parse(raw).map_err(|_| "endpoint_url must be a valid URL".to_string())?;
|
||||
match url.scheme() {
|
||||
"http" | "https" => {}
|
||||
_ => return Err("endpoint_url must use http or https".to_string()),
|
||||
}
|
||||
if !url.username().is_empty() || url.password().is_some() {
|
||||
return Err("endpoint_url must not contain embedded credentials".to_string());
|
||||
}
|
||||
if url.fragment().is_some() {
|
||||
return Err("endpoint_url must not include a URL fragment".to_string());
|
||||
}
|
||||
|
||||
let host = url
|
||||
.host_str()
|
||||
.ok_or_else(|| "endpoint_url must include a host".to_string())?;
|
||||
|
||||
if host.eq_ignore_ascii_case("localhost") {
|
||||
return Err("endpoint_url host is not allowed".to_string());
|
||||
}
|
||||
|
||||
if let Ok(ip) = host.parse::<IpAddr>() {
|
||||
if is_private_ip(ip) {
|
||||
return Err("endpoint_url host is not allowed".to_string());
|
||||
}
|
||||
} else {
|
||||
let port = url
|
||||
.port_or_known_default()
|
||||
.ok_or_else(|| "endpoint_url must include a port".to_string())?;
|
||||
let host_port = format!("{}:{}", host, port);
|
||||
let mut resolved = false;
|
||||
let addrs = tokio::time::timeout(Duration::from_secs(3), lookup_host(host_port))
|
||||
.await
|
||||
.map_err(|_| "endpoint_url host resolution timed out".to_string())?
|
||||
.map_err(|_| "endpoint_url host could not be resolved".to_string())?;
|
||||
for addr in addrs {
|
||||
resolved = true;
|
||||
if is_private_ip(addr.ip()) {
|
||||
return Err("endpoint_url host is not allowed".to_string());
|
||||
}
|
||||
}
|
||||
if !resolved {
|
||||
return Err("endpoint_url host could not be resolved".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn is_private_ip(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(v4) => {
|
||||
v4.is_private()
|
||||
|| v4.is_loopback()
|
||||
|| v4.is_link_local()
|
||||
|| v4.is_multicast()
|
||||
|| v4.is_unspecified()
|
||||
|| v4.is_broadcast()
|
||||
}
|
||||
IpAddr::V6(v6) => {
|
||||
v6.is_loopback()
|
||||
|| v6.is_unique_local()
|
||||
|| v6.is_unicast_link_local()
|
||||
|| v6.is_multicast()
|
||||
|| v6.is_unspecified()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_asset_path(raw: &str) -> Option<String> {
|
||||
let normalized = raw.replace('\\', "/");
|
||||
let mut segments = Vec::new();
|
||||
|
||||
for segment in normalized.split('/') {
|
||||
if segment.is_empty() || segment == "." {
|
||||
continue;
|
||||
}
|
||||
if segment == ".." {
|
||||
return None;
|
||||
}
|
||||
segments.push(segment);
|
||||
}
|
||||
|
||||
if segments.is_empty() {
|
||||
Some("index.html".to_string())
|
||||
} else {
|
||||
Some(segments.join("/"))
|
||||
}
|
||||
}
|
||||
|
||||
// Static asset handlers
|
||||
|
||||
async fn index_handler() -> impl IntoResponse {
|
||||
static_handler(Uri::from_static("/index.html")).await
|
||||
}
|
||||
|
||||
async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
let raw_path = uri.path().trim_start_matches('/');
|
||||
let path = match sanitize_asset_path(raw_path) {
|
||||
Some(path) => path,
|
||||
None => return StatusCode::NOT_FOUND.into_response(),
|
||||
};
|
||||
|
||||
if let Some(content) = load_static_asset(&path) {
|
||||
let mime = mime_guess::from_path(&path).first_or_octet_stream();
|
||||
return ([(header::CONTENT_TYPE, mime.as_ref())], content).into_response();
|
||||
}
|
||||
|
||||
// Attempt to serve index.html for directory paths (e.g. /jobs -> jobs/index.html)
|
||||
if !path.contains('.') {
|
||||
let index_path = format!("{}/index.html", path);
|
||||
if let Some(content) = load_static_asset(&index_path) {
|
||||
let mime = mime_guess::from_path("index.html").first_or_octet_stream();
|
||||
return ([(header::CONTENT_TYPE, mime.as_ref())], content).into_response();
|
||||
}
|
||||
}
|
||||
|
||||
if path == "index.html" {
|
||||
const MISSING_WEB_BUILD_PAGE: &str = r#"<!doctype html>
|
||||
<html lang="en">
|
||||
<head><meta charset="utf-8"><title>Alchemist UI Not Built</title></head>
|
||||
<body>
|
||||
<h1>Alchemist UI is not built</h1>
|
||||
<p>The backend is running, but frontend assets are missing.</p>
|
||||
<p>Run <code>cd web && bun install && bun run build</code>, then restart Alchemist.</p>
|
||||
</body>
|
||||
</html>"#;
|
||||
return (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
[(header::CONTENT_TYPE, "text/html; charset=utf-8")],
|
||||
MISSING_WEB_BUILD_PAGE,
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
if !path.contains('.') {
|
||||
if let Some(content) = load_static_asset("404.html") {
|
||||
let mime = mime_guess::from_path("404.html").first_or_octet_stream();
|
||||
return (
|
||||
StatusCode::NOT_FOUND,
|
||||
[(header::CONTENT_TYPE, mime.as_ref())],
|
||||
content,
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback to 404 for missing files.
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
555
src/server/scan.rs
Normal file
555
src/server/scan.rs
Normal file
@@ -0,0 +1,555 @@
|
||||
//! Library scanning and watch folder handlers.
|
||||
|
||||
use super::{AppState, is_row_not_found, refresh_file_watcher, save_config_or_response};
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use futures::{FutureExt, StreamExt, stream};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path as FsPath;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::error;
|
||||
|
||||
pub(crate) async fn scan_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
let mut dirs: Vec<std::path::PathBuf> = config
|
||||
.scanner
|
||||
.directories
|
||||
.iter()
|
||||
.map(std::path::PathBuf::from)
|
||||
.collect();
|
||||
drop(config);
|
||||
|
||||
if let Ok(watch_dirs) = state.db.get_watch_dirs().await {
|
||||
for wd in watch_dirs {
|
||||
dirs.push(std::path::PathBuf::from(wd.path));
|
||||
}
|
||||
}
|
||||
|
||||
let _ = state.agent.scan_and_enqueue(dirs).await;
|
||||
StatusCode::OK
|
||||
}
|
||||
|
||||
pub(crate) async fn start_scan_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
match state.library_scanner.start_scan().await {
|
||||
Ok(_) => StatusCode::ACCEPTED.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_scan_status_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
axum::Json::<crate::system::scanner::ScanStatus>(state.library_scanner.get_status().await)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
// Library health handlers
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct LibraryHealthIssueResponse {
|
||||
job: crate::db::Job,
|
||||
report: crate::media::health::HealthIssueReport,
|
||||
}
|
||||
|
||||
pub(crate) async fn library_health_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_health_summary().await {
|
||||
Ok(summary) => axum::Json(summary).into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_library_health_issues_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_jobs_with_health_issues().await {
|
||||
Ok(jobs) => {
|
||||
let issues = jobs
|
||||
.into_iter()
|
||||
.map(|row| {
|
||||
let (job, raw_health_issue) = row.into_parts();
|
||||
let report = serde_json::from_str::<crate::media::health::HealthIssueReport>(
|
||||
&raw_health_issue,
|
||||
)
|
||||
.unwrap_or_else(|_| {
|
||||
crate::media::health::categorize_health_output(&raw_health_issue)
|
||||
});
|
||||
LibraryHealthIssueResponse { job, report }
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
axum::Json(issues).into_response()
|
||||
}
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_library_health_scan(db: Arc<crate::db::Db>) {
|
||||
let result = std::panic::AssertUnwindSafe({
|
||||
let db = db.clone();
|
||||
async move {
|
||||
let created_run_id = match db.create_health_scan_run().await {
|
||||
Ok(id) => id,
|
||||
Err(err) => {
|
||||
error!("Failed to create library health scan run: {}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let jobs = match db.get_jobs_needing_health_check().await {
|
||||
Ok(jobs) => jobs,
|
||||
Err(err) => {
|
||||
error!("Failed to load jobs for library health scan: {}", err);
|
||||
let _ = db.complete_health_scan_run(created_run_id, 0, 0).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let counters = Arc::new(Mutex::new((0_i64, 0_i64)));
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(2));
|
||||
|
||||
stream::iter(jobs)
|
||||
.for_each_concurrent(None, {
|
||||
let db = db.clone();
|
||||
let counters = counters.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
move |job| {
|
||||
let db = db.clone();
|
||||
let counters = counters.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
async move {
|
||||
let Ok(permit) = semaphore.acquire_owned().await else {
|
||||
error!("Library health scan semaphore closed unexpectedly");
|
||||
return;
|
||||
};
|
||||
let _permit = permit;
|
||||
|
||||
match crate::media::health::HealthChecker::check_file(FsPath::new(
|
||||
&job.output_path,
|
||||
))
|
||||
.await
|
||||
{
|
||||
Ok(issues) => {
|
||||
if let Err(err) =
|
||||
db.record_health_check(job.id, issues.as_ref()).await
|
||||
{
|
||||
error!(
|
||||
"Failed to record library health result for job {}: {}",
|
||||
job.id, err
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut guard = counters.lock().await;
|
||||
guard.0 += 1;
|
||||
if issues.is_some() {
|
||||
guard.1 += 1;
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Library health check was inconclusive for job {} ({}): {}",
|
||||
job.id, job.output_path, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
let (files_checked, issues_found) = *counters.lock().await;
|
||||
if let Err(err) = db
|
||||
.complete_health_scan_run(created_run_id, files_checked, issues_found)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"Failed to complete library health scan run {}: {}",
|
||||
created_run_id, err
|
||||
);
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch_unwind()
|
||||
.await;
|
||||
|
||||
if result.is_err() {
|
||||
error!("Library health scan panicked");
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn start_library_health_scan_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let db = state.db.clone();
|
||||
tokio::spawn(async move {
|
||||
run_library_health_scan(db).await;
|
||||
});
|
||||
|
||||
(
|
||||
StatusCode::ACCEPTED,
|
||||
axum::Json(serde_json::json!({ "status": "accepted" })),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn rescan_library_health_issue_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
let job = match state.db.get_job_by_id(id).await {
|
||||
Ok(Some(job)) => job,
|
||||
Ok(None) => return StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => return (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
};
|
||||
|
||||
match crate::media::health::HealthChecker::check_file(FsPath::new(&job.output_path)).await {
|
||||
Ok(issue) => {
|
||||
if let Err(err) = state.db.record_health_check(job.id, issue.as_ref()).await {
|
||||
return (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response();
|
||||
}
|
||||
axum::Json(serde_json::json!({
|
||||
"job_id": job.id,
|
||||
"issue_found": issue.is_some(),
|
||||
}))
|
||||
.into_response()
|
||||
}
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
// Watch directories handlers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct AddWatchDirPayload {
|
||||
path: String,
|
||||
is_recursive: Option<bool>,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_watch_dirs_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_watch_dirs().await {
|
||||
Ok(dirs) => axum::Json(dirs).into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn add_watch_dir_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<AddWatchDirPayload>,
|
||||
) -> impl IntoResponse {
|
||||
let normalized_path = match super::canonicalize_directory_path(&payload.path, "path") {
|
||||
Ok(path) => path,
|
||||
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
};
|
||||
|
||||
let normalized_path = normalized_path.to_string_lossy().to_string();
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
if next_config
|
||||
.scanner
|
||||
.extra_watch_dirs
|
||||
.iter()
|
||||
.any(|watch_dir| watch_dir.path == normalized_path)
|
||||
{
|
||||
return (StatusCode::CONFLICT, "watch folder already exists").into_response();
|
||||
}
|
||||
next_config
|
||||
.scanner
|
||||
.extra_watch_dirs
|
||||
.push(crate::config::WatchDirConfig {
|
||||
path: normalized_path.clone(),
|
||||
is_recursive: payload.is_recursive.unwrap_or(true),
|
||||
});
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
refresh_file_watcher(&state).await;
|
||||
|
||||
match state.db.get_watch_dirs().await {
|
||||
Ok(dirs) => dirs
|
||||
.into_iter()
|
||||
.find(|dir| dir.path == normalized_path)
|
||||
.map(|dir| axum::Json(dir).into_response())
|
||||
.unwrap_or_else(|| StatusCode::OK.into_response()),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn remove_watch_dir_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
let dir = match state.db.get_watch_dirs().await {
|
||||
Ok(dirs) => dirs.into_iter().find(|dir| dir.id == id),
|
||||
Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
};
|
||||
let Some(dir) = dir else {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
};
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config
|
||||
.scanner
|
||||
.extra_watch_dirs
|
||||
.retain(|watch_dir| watch_dir.path != dir.path);
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
refresh_file_watcher(&state).await;
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
|
||||
// Library profiles handlers
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct LibraryProfileResponse {
|
||||
id: i64,
|
||||
name: String,
|
||||
preset: String,
|
||||
codec: String,
|
||||
quality_profile: String,
|
||||
hdr_mode: String,
|
||||
audio_mode: String,
|
||||
crf_override: Option<i32>,
|
||||
notes: Option<String>,
|
||||
created_at: chrono::DateTime<Utc>,
|
||||
updated_at: chrono::DateTime<Utc>,
|
||||
builtin: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct LibraryProfilePayload {
|
||||
name: String,
|
||||
preset: String,
|
||||
codec: String,
|
||||
quality_profile: String,
|
||||
hdr_mode: String,
|
||||
audio_mode: String,
|
||||
crf_override: Option<i32>,
|
||||
notes: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct AssignWatchDirProfilePayload {
|
||||
profile_id: Option<i64>,
|
||||
}
|
||||
|
||||
fn is_builtin_profile_id(id: i64) -> bool {
|
||||
crate::config::BUILT_IN_LIBRARY_PROFILES
|
||||
.iter()
|
||||
.any(|profile| profile.id == id)
|
||||
}
|
||||
|
||||
fn library_profile_response(profile: crate::db::LibraryProfile) -> LibraryProfileResponse {
|
||||
LibraryProfileResponse {
|
||||
id: profile.id,
|
||||
name: profile.name,
|
||||
preset: profile.preset,
|
||||
codec: profile.codec,
|
||||
quality_profile: profile.quality_profile,
|
||||
hdr_mode: profile.hdr_mode,
|
||||
audio_mode: profile.audio_mode,
|
||||
crf_override: profile.crf_override,
|
||||
notes: profile.notes,
|
||||
created_at: profile.created_at,
|
||||
updated_at: profile.updated_at,
|
||||
builtin: is_builtin_profile_id(profile.id),
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_library_profile_payload(
|
||||
payload: &LibraryProfilePayload,
|
||||
) -> std::result::Result<(), &'static str> {
|
||||
if payload.name.trim().is_empty() {
|
||||
return Err("name must not be empty");
|
||||
}
|
||||
if payload.preset.trim().is_empty() {
|
||||
return Err("preset must not be empty");
|
||||
}
|
||||
if payload.codec.trim().is_empty() {
|
||||
return Err("codec must not be empty");
|
||||
}
|
||||
if payload.quality_profile.trim().is_empty() {
|
||||
return Err("quality_profile must not be empty");
|
||||
}
|
||||
if payload.hdr_mode.trim().is_empty() {
|
||||
return Err("hdr_mode must not be empty");
|
||||
}
|
||||
if payload.audio_mode.trim().is_empty() {
|
||||
return Err("audio_mode must not be empty");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn to_new_library_profile(payload: LibraryProfilePayload) -> crate::db::NewLibraryProfile {
|
||||
crate::db::NewLibraryProfile {
|
||||
name: payload.name.trim().to_string(),
|
||||
preset: payload.preset.trim().to_string(),
|
||||
codec: payload.codec.trim().to_ascii_lowercase(),
|
||||
quality_profile: payload.quality_profile.trim().to_ascii_lowercase(),
|
||||
hdr_mode: payload.hdr_mode.trim().to_ascii_lowercase(),
|
||||
audio_mode: payload.audio_mode.trim().to_ascii_lowercase(),
|
||||
crf_override: payload.crf_override,
|
||||
notes: payload
|
||||
.notes
|
||||
.map(|notes| notes.trim().to_string())
|
||||
.filter(|notes| !notes.is_empty()),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn list_profiles_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
match state.db.get_all_profiles().await {
|
||||
Ok(profiles) => axum::Json(
|
||||
profiles
|
||||
.into_iter()
|
||||
.map(library_profile_response)
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_profile_presets_handler() -> impl IntoResponse {
|
||||
let presets = crate::config::BUILT_IN_LIBRARY_PROFILES
|
||||
.iter()
|
||||
.map(|preset| {
|
||||
serde_json::json!({
|
||||
"id": preset.id,
|
||||
"name": preset.name,
|
||||
"preset": preset.preset,
|
||||
"codec": preset.codec.as_str(),
|
||||
"quality_profile": preset.quality_profile.as_str(),
|
||||
"hdr_mode": preset.hdr_mode.as_str(),
|
||||
"audio_mode": preset.audio_mode.as_str(),
|
||||
"crf_override": preset.crf_override,
|
||||
"notes": preset.notes,
|
||||
"builtin": true
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
axum::Json(presets).into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn create_profile_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<LibraryProfilePayload>,
|
||||
) -> impl IntoResponse {
|
||||
if let Err(message) = validate_library_profile_payload(&payload) {
|
||||
return (StatusCode::BAD_REQUEST, message).into_response();
|
||||
}
|
||||
|
||||
let new_profile = to_new_library_profile(payload);
|
||||
let id = match state.db.create_profile(new_profile).await {
|
||||
Ok(id) => id,
|
||||
Err(err) => return (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
};
|
||||
|
||||
match state.db.get_profile(id).await {
|
||||
Ok(Some(profile)) => (
|
||||
StatusCode::CREATED,
|
||||
axum::Json(library_profile_response(profile)),
|
||||
)
|
||||
.into_response(),
|
||||
Ok(None) => StatusCode::CREATED.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn update_profile_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
axum::Json(payload): axum::Json<LibraryProfilePayload>,
|
||||
) -> impl IntoResponse {
|
||||
if is_builtin_profile_id(id) {
|
||||
return (StatusCode::CONFLICT, "Built-in presets are read-only").into_response();
|
||||
}
|
||||
if let Err(message) = validate_library_profile_payload(&payload) {
|
||||
return (StatusCode::BAD_REQUEST, message).into_response();
|
||||
}
|
||||
|
||||
match state
|
||||
.db
|
||||
.update_profile(id, to_new_library_profile(payload))
|
||||
.await
|
||||
{
|
||||
Ok(_) => match state.db.get_profile(id).await {
|
||||
Ok(Some(profile)) => axum::Json(library_profile_response(profile)).into_response(),
|
||||
Ok(None) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
},
|
||||
Err(err) if is_row_not_found(&err) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_profile_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
if is_builtin_profile_id(id) {
|
||||
return (StatusCode::CONFLICT, "Built-in presets cannot be deleted").into_response();
|
||||
}
|
||||
|
||||
match state.db.count_watch_dirs_using_profile(id).await {
|
||||
Ok(count) if count > 0 => (
|
||||
StatusCode::CONFLICT,
|
||||
"Profile is still assigned to one or more watch folders",
|
||||
)
|
||||
.into_response(),
|
||||
Ok(_) => match state.db.delete_profile(id).await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(err) if is_row_not_found(&err) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
},
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn assign_watch_dir_profile_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
axum::Json(payload): axum::Json<AssignWatchDirProfilePayload>,
|
||||
) -> impl IntoResponse {
|
||||
if let Some(profile_id) = payload.profile_id {
|
||||
match state.db.get_profile(profile_id).await {
|
||||
Ok(Some(_)) => {}
|
||||
Ok(None) => return StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => {
|
||||
return (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match state
|
||||
.db
|
||||
.assign_profile_to_watch_dir(id, payload.profile_id)
|
||||
.await
|
||||
{
|
||||
Ok(_) => match state.db.get_watch_dirs().await {
|
||||
Ok(dirs) => dirs
|
||||
.into_iter()
|
||||
.find(|dir| dir.id == id)
|
||||
.map(|dir| axum::Json(dir).into_response())
|
||||
.unwrap_or_else(|| StatusCode::OK.into_response()),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
},
|
||||
Err(err) if is_row_not_found(&err) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
743
src/server/settings.rs
Normal file
743
src/server/settings.rs
Normal file
@@ -0,0 +1,743 @@
|
||||
//! Configuration get/set, validation handlers.
|
||||
|
||||
use super::{
|
||||
AppState, config_read_error_response, config_save_error_to_response,
|
||||
config_write_blocked_response, hardware_error_response, has_path_separator,
|
||||
normalize_optional_directory, normalize_optional_path, normalize_schedule_time,
|
||||
refresh_file_watcher, replace_runtime_hardware, save_config_or_response,
|
||||
validate_notification_url, validate_transcode_payload,
|
||||
};
|
||||
use crate::config::Config;
|
||||
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::IntoResponse,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
|
||||
// Transcode settings
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
pub(crate) struct TranscodeSettingsPayload {
|
||||
pub(crate) concurrent_jobs: usize,
|
||||
pub(crate) size_reduction_threshold: f64,
|
||||
pub(crate) min_bpp_threshold: f64,
|
||||
pub(crate) min_file_size_mb: u64,
|
||||
pub(crate) output_codec: crate::config::OutputCodec,
|
||||
pub(crate) quality_profile: crate::config::QualityProfile,
|
||||
#[serde(default)]
|
||||
pub(crate) threads: usize,
|
||||
#[serde(default = "crate::config::default_allow_fallback")]
|
||||
pub(crate) allow_fallback: bool,
|
||||
#[serde(default)]
|
||||
pub(crate) hdr_mode: crate::config::HdrMode,
|
||||
#[serde(default)]
|
||||
pub(crate) tonemap_algorithm: crate::config::TonemapAlgorithm,
|
||||
#[serde(default = "crate::config::default_tonemap_peak")]
|
||||
pub(crate) tonemap_peak: f32,
|
||||
#[serde(default = "crate::config::default_tonemap_desat")]
|
||||
pub(crate) tonemap_desat: f32,
|
||||
#[serde(default)]
|
||||
pub(crate) subtitle_mode: crate::config::SubtitleMode,
|
||||
#[serde(default)]
|
||||
pub(crate) stream_rules: crate::config::StreamRules,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_transcode_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
axum::Json(TranscodeSettingsPayload {
|
||||
concurrent_jobs: config.transcode.concurrent_jobs,
|
||||
size_reduction_threshold: config.transcode.size_reduction_threshold,
|
||||
min_bpp_threshold: config.transcode.min_bpp_threshold,
|
||||
min_file_size_mb: config.transcode.min_file_size_mb,
|
||||
output_codec: config.transcode.output_codec,
|
||||
quality_profile: config.transcode.quality_profile,
|
||||
threads: config.transcode.threads,
|
||||
allow_fallback: config.transcode.allow_fallback,
|
||||
hdr_mode: config.transcode.hdr_mode,
|
||||
tonemap_algorithm: config.transcode.tonemap_algorithm,
|
||||
tonemap_peak: config.transcode.tonemap_peak,
|
||||
tonemap_desat: config.transcode.tonemap_desat,
|
||||
subtitle_mode: config.transcode.subtitle_mode,
|
||||
stream_rules: config.transcode.stream_rules.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn update_transcode_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<TranscodeSettingsPayload>,
|
||||
) -> impl IntoResponse {
|
||||
if let Err(msg) = validate_transcode_payload(&payload) {
|
||||
return (StatusCode::BAD_REQUEST, msg).into_response();
|
||||
}
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config.transcode.concurrent_jobs = payload.concurrent_jobs;
|
||||
next_config.transcode.size_reduction_threshold = payload.size_reduction_threshold;
|
||||
next_config.transcode.min_bpp_threshold = payload.min_bpp_threshold;
|
||||
next_config.transcode.min_file_size_mb = payload.min_file_size_mb;
|
||||
next_config.transcode.output_codec = payload.output_codec;
|
||||
next_config.transcode.quality_profile = payload.quality_profile;
|
||||
next_config.transcode.threads = payload.threads;
|
||||
next_config.transcode.allow_fallback = payload.allow_fallback;
|
||||
next_config.transcode.hdr_mode = payload.hdr_mode;
|
||||
next_config.transcode.tonemap_algorithm = payload.tonemap_algorithm;
|
||||
next_config.transcode.tonemap_peak = payload.tonemap_peak;
|
||||
next_config.transcode.tonemap_desat = payload.tonemap_desat;
|
||||
next_config.transcode.subtitle_mode = payload.subtitle_mode;
|
||||
next_config.transcode.stream_rules = payload.stream_rules.clone();
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
|
||||
state.agent.set_manual_override(true);
|
||||
state
|
||||
.agent
|
||||
.set_concurrent_jobs(payload.concurrent_jobs)
|
||||
.await;
|
||||
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
|
||||
// Hardware settings
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct HardwareSettingsPayload {
|
||||
allow_cpu_fallback: bool,
|
||||
allow_cpu_encoding: bool,
|
||||
cpu_preset: String,
|
||||
preferred_vendor: Option<String>,
|
||||
#[serde(default)]
|
||||
device_path: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_hardware_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
axum::Json(HardwareSettingsPayload {
|
||||
allow_cpu_fallback: config.hardware.allow_cpu_fallback,
|
||||
allow_cpu_encoding: config.hardware.allow_cpu_encoding,
|
||||
cpu_preset: config.hardware.cpu_preset.to_string(),
|
||||
preferred_vendor: config.hardware.preferred_vendor.clone(),
|
||||
device_path: config.hardware.device_path.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn update_hardware_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<HardwareSettingsPayload>,
|
||||
) -> impl IntoResponse {
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
|
||||
next_config.hardware.allow_cpu_fallback = payload.allow_cpu_fallback;
|
||||
next_config.hardware.allow_cpu_encoding = payload.allow_cpu_encoding;
|
||||
next_config.hardware.cpu_preset = match payload.cpu_preset.to_lowercase().as_str() {
|
||||
"slow" => crate::config::CpuPreset::Slow,
|
||||
"medium" => crate::config::CpuPreset::Medium,
|
||||
"fast" => crate::config::CpuPreset::Fast,
|
||||
"faster" => crate::config::CpuPreset::Faster,
|
||||
_ => crate::config::CpuPreset::Medium,
|
||||
};
|
||||
next_config.hardware.preferred_vendor = payload.preferred_vendor;
|
||||
next_config.hardware.device_path =
|
||||
match normalize_optional_path(payload.device_path.as_deref(), "device_path") {
|
||||
Ok(path) => path,
|
||||
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
};
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
let (hardware_info, probe_log) =
|
||||
match crate::system::hardware::detect_hardware_with_log(&next_config).await {
|
||||
Ok(result) => result,
|
||||
Err(err) => return hardware_error_response(&err),
|
||||
};
|
||||
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
replace_runtime_hardware(state.as_ref(), hardware_info, probe_log).await;
|
||||
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
|
||||
// System settings
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub(crate) struct SystemSettingsPayload {
|
||||
monitoring_poll_interval: f64,
|
||||
enable_telemetry: bool,
|
||||
#[serde(default)]
|
||||
watch_enabled: bool,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_system_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
axum::Json(SystemSettingsPayload {
|
||||
monitoring_poll_interval: config.system.monitoring_poll_interval,
|
||||
enable_telemetry: config.system.enable_telemetry,
|
||||
watch_enabled: config.scanner.watch_enabled,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn update_system_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<SystemSettingsPayload>,
|
||||
) -> impl IntoResponse {
|
||||
if payload.monitoring_poll_interval < 0.5 || payload.monitoring_poll_interval > 10.0 {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"monitoring_poll_interval must be between 0.5 and 10.0 seconds",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config.system.monitoring_poll_interval = payload.monitoring_poll_interval;
|
||||
next_config.system.enable_telemetry = payload.enable_telemetry;
|
||||
next_config.scanner.watch_enabled = payload.watch_enabled;
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
|
||||
refresh_file_watcher(&state).await;
|
||||
|
||||
(StatusCode::OK, "Settings updated").into_response()
|
||||
}
|
||||
|
||||
// Settings bundle
|
||||
|
||||
pub(crate) async fn get_settings_bundle_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await.clone();
|
||||
axum::Json(crate::settings::bundle_response(config)).into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn update_settings_bundle_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<Config>,
|
||||
) -> impl IntoResponse {
|
||||
if let Err(err) = payload.validate() {
|
||||
return (StatusCode::BAD_REQUEST, err.to_string()).into_response();
|
||||
}
|
||||
|
||||
let (hardware_info, probe_log) =
|
||||
match crate::system::hardware::detect_hardware_with_log(&payload).await {
|
||||
Ok(result) => result,
|
||||
Err(err) => return hardware_error_response(&err),
|
||||
};
|
||||
|
||||
if let Err(response) = save_config_or_response(&state, &payload).await {
|
||||
return *response;
|
||||
}
|
||||
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = payload.clone();
|
||||
}
|
||||
|
||||
state.agent.set_manual_override(true);
|
||||
*state.agent.engine_mode.write().await = payload.system.engine_mode;
|
||||
state
|
||||
.agent
|
||||
.set_concurrent_jobs(payload.transcode.concurrent_jobs)
|
||||
.await;
|
||||
replace_runtime_hardware(state.as_ref(), hardware_info, probe_log).await;
|
||||
refresh_file_watcher(&state).await;
|
||||
state.scheduler.trigger();
|
||||
|
||||
axum::Json(crate::settings::bundle_response(payload)).into_response()
|
||||
}
|
||||
|
||||
// Setting preferences
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct SettingPreferencePayload {
|
||||
key: String,
|
||||
value: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SettingPreferenceResponse {
|
||||
key: String,
|
||||
value: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn set_setting_preference_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<SettingPreferencePayload>,
|
||||
) -> impl IntoResponse {
|
||||
let key = payload.key.trim();
|
||||
if key.is_empty() {
|
||||
return (StatusCode::BAD_REQUEST, "key must not be empty").into_response();
|
||||
}
|
||||
|
||||
match state.db.set_preference(key, payload.value.as_str()).await {
|
||||
Ok(_) => axum::Json(SettingPreferenceResponse {
|
||||
key: key.to_string(),
|
||||
value: payload.value,
|
||||
})
|
||||
.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_setting_preference_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(key): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_preference(key.as_str()).await {
|
||||
Ok(Some(value)) => axum::Json(SettingPreferenceResponse { key, value }).into_response(),
|
||||
Ok(None) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
// Raw config
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct RawConfigPayload {
|
||||
raw_toml: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_settings_config_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let raw_toml = match crate::settings::load_raw_config(state.config_path.as_path()) {
|
||||
Ok(raw_toml) => raw_toml,
|
||||
Err(err) => return config_read_error_response("load raw config", &err),
|
||||
};
|
||||
let normalized = state.config.read().await.clone();
|
||||
axum::Json(crate::settings::config_response(raw_toml, normalized)).into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn update_settings_config_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<RawConfigPayload>,
|
||||
) -> impl IntoResponse {
|
||||
let config = match crate::settings::parse_raw_config(&payload.raw_toml) {
|
||||
Ok(config) => config,
|
||||
Err(err) => return hardware_error_response(&err),
|
||||
};
|
||||
|
||||
let (hardware_info, probe_log) =
|
||||
match crate::system::hardware::detect_hardware_with_log(&config).await {
|
||||
Ok(result) => result,
|
||||
Err(err) => return hardware_error_response(&err),
|
||||
};
|
||||
|
||||
if !state.config_mutable {
|
||||
return config_write_blocked_response(state.config_path.as_path());
|
||||
}
|
||||
|
||||
if let Some(parent) = state.config_path.parent() {
|
||||
if !parent.as_os_str().is_empty() && !parent.exists() {
|
||||
if let Err(err) = std::fs::create_dir_all(parent) {
|
||||
return config_save_error_to_response(&state.config_path, &anyhow::Error::new(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(err) = crate::settings::save_config_and_project(
|
||||
state.db.as_ref(),
|
||||
state.config_path.as_path(),
|
||||
&config,
|
||||
)
|
||||
.await
|
||||
{
|
||||
return config_save_error_to_response(
|
||||
&state.config_path,
|
||||
&anyhow::Error::msg(err.to_string()),
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
let mut config_lock = state.config.write().await;
|
||||
*config_lock = config.clone();
|
||||
}
|
||||
|
||||
state.agent.set_manual_override(true);
|
||||
*state.agent.engine_mode.write().await = config.system.engine_mode;
|
||||
state
|
||||
.agent
|
||||
.set_concurrent_jobs(config.transcode.concurrent_jobs)
|
||||
.await;
|
||||
replace_runtime_hardware(state.as_ref(), hardware_info, probe_log).await;
|
||||
refresh_file_watcher(&state).await;
|
||||
state.scheduler.trigger();
|
||||
|
||||
axum::Json(crate::settings::config_response(payload.raw_toml, config)).into_response()
|
||||
}
|
||||
|
||||
// Notification settings
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct AddNotificationTargetPayload {
|
||||
name: String,
|
||||
target_type: String,
|
||||
endpoint_url: String,
|
||||
auth_token: Option<String>,
|
||||
events: Vec<String>,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_notifications_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_notification_targets().await {
|
||||
Ok(t) => axum::Json(serde_json::json!(t)).into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn add_notification_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<AddNotificationTargetPayload>,
|
||||
) -> impl IntoResponse {
|
||||
if let Err(msg) = validate_notification_url(&payload.endpoint_url).await {
|
||||
return (StatusCode::BAD_REQUEST, msg).into_response();
|
||||
}
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config
|
||||
.notifications
|
||||
.targets
|
||||
.push(crate::config::NotificationTargetConfig {
|
||||
name: payload.name.clone(),
|
||||
target_type: payload.target_type.clone(),
|
||||
endpoint_url: payload.endpoint_url.clone(),
|
||||
auth_token: payload.auth_token.clone(),
|
||||
events: payload.events.clone(),
|
||||
enabled: payload.enabled,
|
||||
});
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
|
||||
match state.db.get_notification_targets().await {
|
||||
Ok(targets) => targets
|
||||
.into_iter()
|
||||
.find(|target| {
|
||||
target.name == payload.name
|
||||
&& target.target_type == payload.target_type
|
||||
&& target.endpoint_url == payload.endpoint_url
|
||||
})
|
||||
.map(|target| axum::Json(serde_json::json!(target)).into_response())
|
||||
.unwrap_or_else(|| StatusCode::OK.into_response()),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_notification_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
let target = match state.db.get_notification_targets().await {
|
||||
Ok(targets) => targets.into_iter().find(|target| target.id == id),
|
||||
Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
};
|
||||
let Some(target) = target else {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
};
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config.notifications.targets.retain(|candidate| {
|
||||
!(candidate.name == target.name
|
||||
&& candidate.target_type == target.target_type
|
||||
&& candidate.endpoint_url == target.endpoint_url)
|
||||
});
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn test_notification_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<AddNotificationTargetPayload>,
|
||||
) -> impl IntoResponse {
|
||||
if let Err(msg) = validate_notification_url(&payload.endpoint_url).await {
|
||||
return (StatusCode::BAD_REQUEST, msg).into_response();
|
||||
}
|
||||
|
||||
// Construct a temporary target
|
||||
let events_json = serde_json::to_string(&payload.events).unwrap_or_default();
|
||||
let target = crate::db::NotificationTarget {
|
||||
id: 0,
|
||||
name: payload.name,
|
||||
target_type: payload.target_type,
|
||||
endpoint_url: payload.endpoint_url,
|
||||
auth_token: payload.auth_token,
|
||||
events: events_json,
|
||||
enabled: payload.enabled,
|
||||
created_at: chrono::Utc::now(),
|
||||
};
|
||||
|
||||
match state.notification_manager.send_test(&target).await {
|
||||
Ok(_) => StatusCode::OK.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule settings
|
||||
|
||||
pub(crate) async fn get_schedule_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
match state.db.get_schedule_windows().await {
|
||||
Ok(w) => axum::Json(serde_json::json!(w)).into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct AddSchedulePayload {
|
||||
start_time: String,
|
||||
end_time: String,
|
||||
days_of_week: Vec<i32>,
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
pub(crate) async fn add_schedule_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<AddSchedulePayload>,
|
||||
) -> impl IntoResponse {
|
||||
if payload.days_of_week.is_empty()
|
||||
|| payload.days_of_week.iter().any(|day| *day < 0 || *day > 6)
|
||||
{
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"days_of_week must include values 0-6",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
let start_time = match normalize_schedule_time(&payload.start_time) {
|
||||
Some(value) => value,
|
||||
None => {
|
||||
return (StatusCode::BAD_REQUEST, "start_time must be HH:MM").into_response();
|
||||
}
|
||||
};
|
||||
let end_time = match normalize_schedule_time(&payload.end_time) {
|
||||
Some(value) => value,
|
||||
None => return (StatusCode::BAD_REQUEST, "end_time must be HH:MM").into_response(),
|
||||
};
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config
|
||||
.schedule
|
||||
.windows
|
||||
.push(crate::config::ScheduleWindowConfig {
|
||||
start_time: start_time.clone(),
|
||||
end_time: end_time.clone(),
|
||||
days_of_week: payload.days_of_week.clone(),
|
||||
enabled: payload.enabled,
|
||||
});
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
state.scheduler.trigger();
|
||||
|
||||
match state.db.get_schedule_windows().await {
|
||||
Ok(windows) => windows
|
||||
.into_iter()
|
||||
.find(|window| {
|
||||
window.start_time == start_time
|
||||
&& window.end_time == end_time
|
||||
&& window.enabled == payload.enabled
|
||||
})
|
||||
.map(|window| axum::Json(serde_json::json!(window)).into_response())
|
||||
.unwrap_or_else(|| StatusCode::OK.into_response()),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn delete_schedule_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(id): Path<i64>,
|
||||
) -> impl IntoResponse {
|
||||
let window = match state.db.get_schedule_windows().await {
|
||||
Ok(windows) => windows.into_iter().find(|window| window.id == id),
|
||||
Err(e) => return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
};
|
||||
let Some(window) = window else {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
};
|
||||
|
||||
let days_of_week: Vec<i32> = serde_json::from_str(&window.days_of_week).unwrap_or_default();
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config.schedule.windows.retain(|candidate| {
|
||||
!(candidate.start_time == window.start_time
|
||||
&& candidate.end_time == window.end_time
|
||||
&& candidate.enabled == window.enabled
|
||||
&& candidate.days_of_week == days_of_week)
|
||||
});
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
state.scheduler.trigger();
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
|
||||
// File settings
|
||||
|
||||
pub(crate) async fn get_file_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
axum::Json(serde_json::json!({
|
||||
"id": 1,
|
||||
"delete_source": config.files.delete_source,
|
||||
"output_extension": config.files.output_extension,
|
||||
"output_suffix": config.files.output_suffix,
|
||||
"replace_strategy": config.files.replace_strategy,
|
||||
"output_root": config.files.output_root,
|
||||
}))
|
||||
.into_response()
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct UpdateFileSettingsPayload {
|
||||
delete_source: bool,
|
||||
output_extension: String,
|
||||
output_suffix: String,
|
||||
replace_strategy: String,
|
||||
#[serde(default)]
|
||||
output_root: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn update_file_settings_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<UpdateFileSettingsPayload>,
|
||||
) -> impl IntoResponse {
|
||||
if has_path_separator(&payload.output_extension) || has_path_separator(&payload.output_suffix) {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"output_extension and output_suffix must not contain path separators",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
let output_root =
|
||||
match normalize_optional_directory(payload.output_root.as_deref(), "output_root") {
|
||||
Ok(value) => value,
|
||||
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
};
|
||||
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config.files.delete_source = payload.delete_source;
|
||||
next_config.files.output_extension = payload.output_extension.clone();
|
||||
next_config.files.output_suffix = payload.output_suffix.clone();
|
||||
next_config.files.replace_strategy = payload.replace_strategy.clone();
|
||||
next_config.files.output_root = output_root.clone();
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
axum::Json(serde_json::json!({
|
||||
"id": 1,
|
||||
"delete_source": payload.delete_source,
|
||||
"output_extension": payload.output_extension,
|
||||
"output_suffix": payload.output_suffix,
|
||||
"replace_strategy": payload.replace_strategy,
|
||||
"output_root": output_root,
|
||||
}))
|
||||
.into_response()
|
||||
}
|
||||
|
||||
// UI Preferences
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
pub(crate) struct UiPreferences {
|
||||
active_theme_id: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_preferences_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
axum::Json(UiPreferences {
|
||||
active_theme_id: config.appearance.active_theme_id.clone(),
|
||||
})
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn update_preferences_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<UiPreferences>,
|
||||
) -> impl IntoResponse {
|
||||
let mut next_config = state.config.read().await.clone();
|
||||
next_config.appearance.active_theme_id = payload.active_theme_id;
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config = state.config.write().await;
|
||||
*config = next_config;
|
||||
}
|
||||
StatusCode::OK.into_response()
|
||||
}
|
||||
264
src/server/sse.rs
Normal file
264
src/server/sse.rs
Normal file
@@ -0,0 +1,264 @@
|
||||
//! Server-sent events (SSE) streaming.
|
||||
|
||||
use crate::db::{AlchemistEvent, ConfigEvent, JobEvent, SystemEvent};
|
||||
use axum::{
|
||||
extract::State,
|
||||
response::sse::{Event as AxumEvent, Sse},
|
||||
};
|
||||
use futures::stream::{self, Stream, StreamExt};
|
||||
use std::convert::Infallible;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::warn;
|
||||
|
||||
use super::AppState;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(crate) struct SseMessage {
|
||||
pub(crate) event_name: &'static str,
|
||||
pub(crate) data: String,
|
||||
}
|
||||
|
||||
impl From<SseMessage> for AxumEvent {
|
||||
fn from(message: SseMessage) -> Self {
|
||||
AxumEvent::default()
|
||||
.event(message.event_name)
|
||||
.data(message.data)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sse_message_for_event(event: &AlchemistEvent) -> SseMessage {
|
||||
match event {
|
||||
AlchemistEvent::Log {
|
||||
level,
|
||||
job_id,
|
||||
message,
|
||||
} => SseMessage {
|
||||
event_name: "log",
|
||||
data: serde_json::json!({
|
||||
"level": level,
|
||||
"job_id": job_id,
|
||||
"message": message
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
AlchemistEvent::Progress {
|
||||
job_id,
|
||||
percentage,
|
||||
time,
|
||||
} => SseMessage {
|
||||
event_name: "progress",
|
||||
data: serde_json::json!({
|
||||
"job_id": job_id,
|
||||
"percentage": percentage,
|
||||
"time": time
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
AlchemistEvent::JobStateChanged { job_id, status } => SseMessage {
|
||||
event_name: "status",
|
||||
data: serde_json::json!({
|
||||
"job_id": job_id,
|
||||
"status": status
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
AlchemistEvent::Decision {
|
||||
job_id,
|
||||
action,
|
||||
reason,
|
||||
} => SseMessage {
|
||||
event_name: "decision",
|
||||
data: serde_json::json!({
|
||||
"job_id": job_id,
|
||||
"action": action,
|
||||
"reason": reason
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sse_message_for_job_event(event: &JobEvent) -> SseMessage {
|
||||
match event {
|
||||
JobEvent::Log {
|
||||
level,
|
||||
job_id,
|
||||
message,
|
||||
} => SseMessage {
|
||||
event_name: "log",
|
||||
data: serde_json::json!({
|
||||
"level": level,
|
||||
"job_id": job_id,
|
||||
"message": message
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
JobEvent::Progress {
|
||||
job_id,
|
||||
percentage,
|
||||
time,
|
||||
} => SseMessage {
|
||||
event_name: "progress",
|
||||
data: serde_json::json!({
|
||||
"job_id": job_id,
|
||||
"percentage": percentage,
|
||||
"time": time
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
JobEvent::StateChanged { job_id, status } => SseMessage {
|
||||
event_name: "status",
|
||||
data: serde_json::json!({
|
||||
"job_id": job_id,
|
||||
"status": status
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
JobEvent::Decision {
|
||||
job_id,
|
||||
action,
|
||||
reason,
|
||||
} => SseMessage {
|
||||
event_name: "decision",
|
||||
data: serde_json::json!({
|
||||
"job_id": job_id,
|
||||
"action": action,
|
||||
"reason": reason
|
||||
})
|
||||
.to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sse_message_for_config_event(event: &ConfigEvent) -> SseMessage {
|
||||
match event {
|
||||
ConfigEvent::Updated(config) => SseMessage {
|
||||
event_name: "config_updated",
|
||||
data: serde_json::to_string(config).unwrap_or_else(|_| "{}".to_string()),
|
||||
},
|
||||
ConfigEvent::WatchFolderAdded(path) => SseMessage {
|
||||
event_name: "watch_folder_added",
|
||||
data: serde_json::json!({ "path": path }).to_string(),
|
||||
},
|
||||
ConfigEvent::WatchFolderRemoved(path) => SseMessage {
|
||||
event_name: "watch_folder_removed",
|
||||
data: serde_json::json!({ "path": path }).to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sse_message_for_system_event(event: &SystemEvent) -> SseMessage {
|
||||
match event {
|
||||
SystemEvent::ScanStarted => SseMessage {
|
||||
event_name: "scan_started",
|
||||
data: "{}".to_string(),
|
||||
},
|
||||
SystemEvent::ScanCompleted => SseMessage {
|
||||
event_name: "scan_completed",
|
||||
data: "{}".to_string(),
|
||||
},
|
||||
SystemEvent::EngineStatusChanged => SseMessage {
|
||||
event_name: "engine_status_changed",
|
||||
data: "{}".to_string(),
|
||||
},
|
||||
SystemEvent::HardwareStateChanged => SseMessage {
|
||||
event_name: "hardware_state_changed",
|
||||
data: "{}".to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sse_lagged_message(skipped: u64) -> SseMessage {
|
||||
SseMessage {
|
||||
event_name: "lagged",
|
||||
data: serde_json::json!({ "skipped": skipped }).to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn sse_unified_stream(
|
||||
job_rx: broadcast::Receiver<JobEvent>,
|
||||
config_rx: broadcast::Receiver<ConfigEvent>,
|
||||
system_rx: broadcast::Receiver<SystemEvent>,
|
||||
) -> impl Stream<Item = std::result::Result<SseMessage, Infallible>> {
|
||||
// Create individual streams for each event type
|
||||
let job_stream = stream::unfold(job_rx, |mut rx| async move {
|
||||
match rx.recv().await {
|
||||
Ok(event) => Some((Ok(sse_message_for_job_event(&event)), rx)),
|
||||
Err(broadcast::error::RecvError::Lagged(skipped)) => {
|
||||
warn!("SSE subscriber lagged on job events; skipped {skipped} events");
|
||||
Some((Ok(sse_lagged_message(skipped)), rx))
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => None,
|
||||
}
|
||||
});
|
||||
|
||||
let config_stream = stream::unfold(config_rx, |mut rx| async move {
|
||||
match rx.recv().await {
|
||||
Ok(event) => Some((Ok(sse_message_for_config_event(&event)), rx)),
|
||||
Err(broadcast::error::RecvError::Lagged(skipped)) => {
|
||||
warn!("SSE subscriber lagged on config events; skipped {skipped} events");
|
||||
Some((Ok(sse_lagged_message(skipped)), rx))
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => None,
|
||||
}
|
||||
});
|
||||
|
||||
let system_stream = stream::unfold(system_rx, |mut rx| async move {
|
||||
match rx.recv().await {
|
||||
Ok(event) => Some((Ok(sse_message_for_system_event(&event)), rx)),
|
||||
Err(broadcast::error::RecvError::Lagged(skipped)) => {
|
||||
warn!("SSE subscriber lagged on system events; skipped {skipped} events");
|
||||
Some((Ok(sse_lagged_message(skipped)), rx))
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => None,
|
||||
}
|
||||
});
|
||||
|
||||
// Merge all streams - this will interleave events from all channels
|
||||
futures::stream::select_all([
|
||||
job_stream.boxed(),
|
||||
config_stream.boxed(),
|
||||
system_stream.boxed(),
|
||||
])
|
||||
}
|
||||
|
||||
pub(crate) fn sse_message_stream(
|
||||
rx: broadcast::Receiver<AlchemistEvent>,
|
||||
) -> impl Stream<Item = std::result::Result<SseMessage, Infallible>> {
|
||||
stream::unfold(rx, |mut rx| async move {
|
||||
match rx.recv().await {
|
||||
Ok(event) => Some((Ok(sse_message_for_event(&event)), rx)),
|
||||
Err(broadcast::error::RecvError::Lagged(skipped)) => {
|
||||
warn!("SSE subscriber lagged; skipped {skipped} events");
|
||||
Some((Ok(sse_lagged_message(skipped)), rx))
|
||||
}
|
||||
Err(broadcast::error::RecvError::Closed) => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn sse_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> Sse<impl Stream<Item = std::result::Result<AxumEvent, Infallible>>> {
|
||||
// Subscribe to all channels
|
||||
let job_rx = state.event_channels.jobs.subscribe();
|
||||
let config_rx = state.event_channels.config.subscribe();
|
||||
let system_rx = state.event_channels.system.subscribe();
|
||||
let legacy_rx = state.tx.subscribe();
|
||||
|
||||
// Create unified stream from new typed channels
|
||||
let unified_stream = sse_unified_stream(job_rx, config_rx, system_rx);
|
||||
|
||||
// Create legacy stream for backwards compatibility
|
||||
let legacy_stream = sse_message_stream(legacy_rx);
|
||||
|
||||
// Merge both streams
|
||||
let combined_stream =
|
||||
futures::stream::select(unified_stream, legacy_stream).map(|message| match message {
|
||||
Ok(message) => Ok(message.into()),
|
||||
Err(never) => match never {},
|
||||
});
|
||||
|
||||
Sse::new(combined_stream).keep_alive(axum::response::sse::KeepAlive::default())
|
||||
}
|
||||
103
src/server/stats.rs
Normal file
103
src/server/stats.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
//! Statistics and savings dashboard handlers.
|
||||
|
||||
use super::{AppState, config_read_error_response};
|
||||
use crate::db::Db;
|
||||
use crate::error::Result;
|
||||
use axum::{extract::State, response::IntoResponse};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub(crate) struct StatsData {
|
||||
pub(crate) total: i64,
|
||||
pub(crate) completed: i64,
|
||||
pub(crate) active: i64,
|
||||
pub(crate) failed: i64,
|
||||
pub(crate) concurrent_limit: usize,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_stats_data(db: &Db, concurrent_limit: usize) -> Result<StatsData> {
|
||||
let s = db.get_stats().await?;
|
||||
let total = s
|
||||
.as_object()
|
||||
.map(|m| m.values().filter_map(|v| v.as_i64()).sum::<i64>())
|
||||
.unwrap_or(0);
|
||||
let completed = s.get("completed").and_then(|v| v.as_i64()).unwrap_or(0);
|
||||
let active = s
|
||||
.as_object()
|
||||
.map(|m| {
|
||||
m.iter()
|
||||
.filter(|(k, _)| {
|
||||
["encoding", "analyzing", "remuxing", "resuming"].contains(&k.as_str())
|
||||
})
|
||||
.map(|(_, v)| v.as_i64().unwrap_or(0))
|
||||
.sum::<i64>()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
let failed = s.get("failed").and_then(|v| v.as_i64()).unwrap_or(0);
|
||||
|
||||
Ok(StatsData {
|
||||
total,
|
||||
completed,
|
||||
active,
|
||||
failed,
|
||||
concurrent_limit,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn stats_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
match get_stats_data(&state.db, state.agent.concurrent_jobs_limit()).await {
|
||||
Ok(stats) => axum::Json(serde_json::json!({
|
||||
"total": stats.total,
|
||||
"completed": stats.completed,
|
||||
"active": stats.active,
|
||||
"failed": stats.failed,
|
||||
"concurrent_limit": stats.concurrent_limit
|
||||
}))
|
||||
.into_response(),
|
||||
Err(err) => config_read_error_response("load job stats", &err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn aggregated_stats_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_aggregated_stats().await {
|
||||
Ok(stats) => {
|
||||
let savings = stats.total_input_size - stats.total_output_size;
|
||||
axum::Json(serde_json::json!({
|
||||
"total_input_bytes": stats.total_input_size,
|
||||
"total_output_bytes": stats.total_output_size,
|
||||
"total_savings_bytes": savings,
|
||||
"total_time_seconds": stats.total_encode_time_seconds,
|
||||
"total_jobs": stats.completed_jobs,
|
||||
"avg_vmaf": stats.avg_vmaf.unwrap_or(0.0)
|
||||
}))
|
||||
.into_response()
|
||||
}
|
||||
Err(err) => config_read_error_response("load aggregated stats", &err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn daily_stats_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
match state.db.get_daily_stats(30).await {
|
||||
Ok(stats) => axum::Json(serde_json::json!(stats)).into_response(),
|
||||
Err(err) => config_read_error_response("load daily stats", &err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn detailed_stats_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_detailed_encode_stats(50).await {
|
||||
Ok(stats) => axum::Json(serde_json::json!(stats)).into_response(),
|
||||
Err(err) => config_read_error_response("load detailed stats", &err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn savings_summary_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.db.get_savings_summary().await {
|
||||
Ok(summary) => axum::Json(summary).into_response(),
|
||||
Err(err) => config_read_error_response("load storage savings summary", &err),
|
||||
}
|
||||
}
|
||||
321
src/server/system.rs
Normal file
321
src/server/system.rs
Normal file
@@ -0,0 +1,321 @@
|
||||
//! System information, hardware info, resources, health handlers.
|
||||
|
||||
use super::{AppState, config_read_error_response};
|
||||
use axum::{
|
||||
extract::State,
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use serde::Serialize;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tracing::error;
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SystemResources {
|
||||
cpu_percent: f32,
|
||||
memory_used_mb: u64,
|
||||
memory_total_mb: u64,
|
||||
memory_percent: f32,
|
||||
uptime_seconds: u64,
|
||||
active_jobs: i64,
|
||||
concurrent_limit: usize,
|
||||
cpu_count: usize,
|
||||
gpu_utilization: Option<f32>,
|
||||
gpu_memory_percent: Option<f32>,
|
||||
}
|
||||
|
||||
pub(crate) async fn system_resources_handler(State(state): State<Arc<AppState>>) -> Response {
|
||||
let mut cache = state.resources_cache.lock().await;
|
||||
if let Some((value, cached_at)) = cache.as_ref() {
|
||||
if cached_at.elapsed() < Duration::from_millis(500) {
|
||||
return axum::Json(value.clone()).into_response();
|
||||
}
|
||||
}
|
||||
|
||||
let (cpu_percent, memory_used_mb, memory_total_mb, memory_percent, cpu_count) = {
|
||||
let mut sys = state.sys.lock().await;
|
||||
sys.refresh_all();
|
||||
|
||||
let cpu_percent =
|
||||
sys.cpus().iter().map(|c| c.cpu_usage()).sum::<f32>() / sys.cpus().len().max(1) as f32;
|
||||
let cpu_count = sys.cpus().len();
|
||||
let memory_used_mb = (sys.used_memory() / 1024 / 1024) as u64;
|
||||
let memory_total_mb = (sys.total_memory() / 1024 / 1024) as u64;
|
||||
let memory_percent = if memory_total_mb > 0 {
|
||||
(memory_used_mb as f32 / memory_total_mb as f32) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
(
|
||||
cpu_percent,
|
||||
memory_used_mb,
|
||||
memory_total_mb,
|
||||
memory_percent,
|
||||
cpu_count,
|
||||
)
|
||||
};
|
||||
|
||||
let uptime_seconds = state.start_time.elapsed().as_secs();
|
||||
let stats = match state.db.get_job_stats().await {
|
||||
Ok(stats) => stats,
|
||||
Err(err) => return config_read_error_response("load system resource stats", &err),
|
||||
};
|
||||
let (gpu_utilization, gpu_memory_percent) = tokio::task::spawn_blocking(query_gpu_utilization)
|
||||
.await
|
||||
.unwrap_or((None, None));
|
||||
|
||||
let value = match serde_json::to_value(SystemResources {
|
||||
cpu_percent,
|
||||
memory_used_mb,
|
||||
memory_total_mb,
|
||||
memory_percent,
|
||||
uptime_seconds,
|
||||
active_jobs: stats.active,
|
||||
concurrent_limit: state.agent.concurrent_jobs_limit(),
|
||||
cpu_count,
|
||||
gpu_utilization,
|
||||
gpu_memory_percent,
|
||||
}) {
|
||||
Ok(value) => value,
|
||||
Err(err) => {
|
||||
error!("Failed to serialize system resource payload: {}", err);
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"Failed to serialize system resource payload",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
|
||||
*cache = Some((value.clone(), Instant::now()));
|
||||
axum::Json(value).into_response()
|
||||
}
|
||||
|
||||
/// Query GPU utilization using nvidia-smi (NVIDIA) or other platform-specific tools
|
||||
fn query_gpu_utilization() -> (Option<f32>, Option<f32>) {
|
||||
// Try nvidia-smi first
|
||||
if let Some(output) = run_command_with_timeout(
|
||||
"nvidia-smi",
|
||||
&[
|
||||
"--query-gpu=utilization.gpu,memory.used,memory.total",
|
||||
"--format=csv,noheader,nounits",
|
||||
],
|
||||
Duration::from_secs(2),
|
||||
) {
|
||||
if output.status.success() {
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
// Format: "45, 2048, 8192" (utilization %, memory used MB, memory total MB)
|
||||
let parts: Vec<&str> = stdout.trim().split(',').map(|s| s.trim()).collect();
|
||||
if parts.len() >= 3 {
|
||||
let util = parts[0].parse::<f32>().ok();
|
||||
let mem_used = parts[1].parse::<f32>().ok();
|
||||
let mem_total = parts[2].parse::<f32>().ok();
|
||||
let mem_percent = match (mem_used, mem_total) {
|
||||
(Some(used), Some(total)) if total > 0.0 => Some((used / total) * 100.0),
|
||||
_ => None,
|
||||
};
|
||||
return (util, mem_percent);
|
||||
}
|
||||
}
|
||||
}
|
||||
(None, None)
|
||||
}
|
||||
|
||||
fn run_command_with_timeout(
|
||||
command: &str,
|
||||
args: &[&str],
|
||||
timeout: Duration,
|
||||
) -> Option<std::process::Output> {
|
||||
let mut child = Command::new(command)
|
||||
.args(args)
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.ok()?;
|
||||
let start = Instant::now();
|
||||
|
||||
loop {
|
||||
if let Ok(Some(_status)) = child.try_wait() {
|
||||
return child.wait_with_output().ok();
|
||||
}
|
||||
|
||||
if start.elapsed() >= timeout {
|
||||
let _ = child.kill();
|
||||
let _ = child.wait();
|
||||
return None;
|
||||
}
|
||||
|
||||
std::thread::sleep(Duration::from_millis(50));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct SystemInfo {
|
||||
version: String,
|
||||
os_version: String,
|
||||
is_docker: bool,
|
||||
telemetry_enabled: bool,
|
||||
ffmpeg_version: String,
|
||||
}
|
||||
|
||||
pub(crate) async fn get_system_info_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
let version = env!("CARGO_PKG_VERSION").to_string();
|
||||
let os_version = format!("{} {}", std::env::consts::OS, std::env::consts::ARCH);
|
||||
let is_docker = std::path::Path::new("/.dockerenv").exists();
|
||||
|
||||
// Attempt to verify ffmpeg version
|
||||
let ffmpeg_version =
|
||||
crate::media::ffmpeg::verify_ffmpeg().unwrap_or_else(|_| "Unknown".to_string());
|
||||
|
||||
axum::Json(SystemInfo {
|
||||
version,
|
||||
os_version,
|
||||
is_docker,
|
||||
telemetry_enabled: config.system.enable_telemetry,
|
||||
ffmpeg_version,
|
||||
})
|
||||
.into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn get_hardware_info_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
match state.hardware_state.snapshot().await {
|
||||
Some(info) => axum::Json(info).into_response(),
|
||||
None => (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Hardware state unavailable",
|
||||
)
|
||||
.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn get_hardware_probe_log_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
axum::Json(state.hardware_probe_log.read().await.clone()).into_response()
|
||||
}
|
||||
|
||||
pub(crate) async fn health_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let uptime = state.start_time.elapsed();
|
||||
let hours = uptime.as_secs() / 3600;
|
||||
let minutes = (uptime.as_secs() % 3600) / 60;
|
||||
let seconds = uptime.as_secs() % 60;
|
||||
|
||||
axum::Json(serde_json::json!({
|
||||
"status": "ok",
|
||||
"version": env!("CARGO_PKG_VERSION"),
|
||||
"uptime": format!("{}h {}m {}s", hours, minutes, seconds),
|
||||
"uptime_seconds": uptime.as_secs()
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) async fn ready_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
// Check if database is accessible
|
||||
let db_ok = state.db.get_stats().await.is_ok();
|
||||
|
||||
if db_ok {
|
||||
(
|
||||
StatusCode::OK,
|
||||
axum::Json(serde_json::json!({ "ready": true })),
|
||||
)
|
||||
} else {
|
||||
(
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
axum::Json(serde_json::json!({ "ready": false, "reason": "database unavailable" })),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Filesystem browsing
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
pub(crate) struct FsBrowseQuery {
|
||||
path: Option<String>,
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_browse_handler(
|
||||
axum::extract::Query(query): axum::extract::Query<FsBrowseQuery>,
|
||||
) -> impl IntoResponse {
|
||||
match crate::system::fs_browser::browse(query.path.as_deref()).await {
|
||||
Ok(response) => axum::Json(response).into_response(),
|
||||
Err(err) => config_read_error_response("browse server filesystem", &err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_recommendations_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
) -> impl IntoResponse {
|
||||
let config = state.config.read().await.clone();
|
||||
match crate::system::fs_browser::recommendations(&config, state.db.as_ref()).await {
|
||||
Ok(response) => axum::Json(response).into_response(),
|
||||
Err(err) => config_read_error_response("load folder recommendations", &err),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn fs_preview_handler(
|
||||
axum::Json(payload): axum::Json<crate::system::fs_browser::FsPreviewRequest>,
|
||||
) -> impl IntoResponse {
|
||||
match crate::system::fs_browser::preview(payload).await {
|
||||
Ok(response) => axum::Json(response).into_response(),
|
||||
Err(err) => config_read_error_response("preview selected server folders", &err),
|
||||
}
|
||||
}
|
||||
|
||||
// Telemetry
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct TelemetryPayload {
|
||||
runtime_id: String,
|
||||
timestamp: String,
|
||||
version: String,
|
||||
os_version: String,
|
||||
is_docker: bool,
|
||||
uptime_seconds: u64,
|
||||
cpu_count: usize,
|
||||
memory_total_mb: u64,
|
||||
active_jobs: i64,
|
||||
concurrent_limit: usize,
|
||||
}
|
||||
|
||||
pub(crate) async fn telemetry_payload_handler(State(state): State<Arc<AppState>>) -> Response {
|
||||
let config = state.config.read().await;
|
||||
if !config.system.enable_telemetry {
|
||||
return (StatusCode::FORBIDDEN, "Telemetry disabled").into_response();
|
||||
}
|
||||
|
||||
let (cpu_count, memory_total_mb) = {
|
||||
let mut sys = state.sys.lock().await;
|
||||
sys.refresh_memory();
|
||||
(sys.cpus().len(), (sys.total_memory() / 1024 / 1024) as u64)
|
||||
};
|
||||
|
||||
let version = env!("CARGO_PKG_VERSION").to_string();
|
||||
let os_version = format!("{} {}", std::env::consts::OS, std::env::consts::ARCH);
|
||||
let is_docker = std::path::Path::new("/.dockerenv").exists();
|
||||
let uptime_seconds = state.start_time.elapsed().as_secs();
|
||||
let stats = match state.db.get_job_stats().await {
|
||||
Ok(stats) => stats,
|
||||
Err(err) => return config_read_error_response("load telemetry stats", &err),
|
||||
};
|
||||
|
||||
axum::Json(TelemetryPayload {
|
||||
runtime_id: state.telemetry_runtime_id.clone(),
|
||||
timestamp: chrono::Utc::now().to_rfc3339(),
|
||||
version,
|
||||
os_version,
|
||||
is_docker,
|
||||
uptime_seconds,
|
||||
cpu_count,
|
||||
memory_total_mb,
|
||||
active_jobs: stats.active,
|
||||
concurrent_limit: config.transcode.concurrent_jobs,
|
||||
})
|
||||
.into_response()
|
||||
}
|
||||
1273
src/server/tests.rs
Normal file
1273
src/server/tests.rs
Normal file
File diff suppressed because it is too large
Load Diff
307
src/server/wizard.rs
Normal file
307
src/server/wizard.rs
Normal file
@@ -0,0 +1,307 @@
|
||||
//! Setup wizard API handlers.
|
||||
|
||||
use super::auth::build_session_cookie;
|
||||
use super::{
|
||||
AppState, canonicalize_directory_path, config_write_blocked_response, hardware_error_response,
|
||||
refresh_file_watcher, replace_runtime_hardware, save_config_or_response,
|
||||
};
|
||||
use argon2::{
|
||||
Argon2,
|
||||
password_hash::{PasswordHasher, SaltString},
|
||||
};
|
||||
use axum::{
|
||||
extract::State,
|
||||
http::{StatusCode, header},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use rand::Rng;
|
||||
use rand::TryRngCore;
|
||||
use rand::rngs::OsRng;
|
||||
use serde::Deserialize;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tracing::{error, info};
|
||||
|
||||
fn default_setup_min_bpp() -> f64 {
|
||||
0.1
|
||||
}
|
||||
|
||||
fn default_setup_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_setup_telemetry() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(crate) struct SetupConfig {
|
||||
username: String,
|
||||
password: String,
|
||||
#[serde(default)]
|
||||
settings: Option<serde_json::Value>,
|
||||
#[serde(default)]
|
||||
size_reduction_threshold: f64,
|
||||
#[serde(default = "default_setup_min_bpp")]
|
||||
min_bpp_threshold: f64,
|
||||
#[serde(default)]
|
||||
min_file_size_mb: u64,
|
||||
#[serde(default)]
|
||||
concurrent_jobs: usize,
|
||||
#[serde(default)]
|
||||
directories: Vec<String>,
|
||||
#[serde(default = "default_setup_true")]
|
||||
allow_cpu_encoding: bool,
|
||||
#[serde(default = "default_setup_telemetry")]
|
||||
enable_telemetry: bool,
|
||||
#[serde(default)]
|
||||
output_codec: crate::config::OutputCodec,
|
||||
#[serde(default)]
|
||||
quality_profile: crate::config::QualityProfile,
|
||||
}
|
||||
|
||||
pub(crate) fn normalize_setup_directories(
|
||||
directories: &[String],
|
||||
) -> std::result::Result<Vec<String>, String> {
|
||||
let mut normalized = Vec::new();
|
||||
let mut seen = std::collections::HashSet::new();
|
||||
|
||||
for value in directories {
|
||||
let trimmed = value.trim();
|
||||
if trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let canonical = canonicalize_directory_path(trimmed, "directories")?;
|
||||
let canonical = canonical.to_string_lossy().to_string();
|
||||
if seen.insert(canonical.clone()) {
|
||||
normalized.push(canonical);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(normalized)
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_status_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let config = state.config.read().await;
|
||||
axum::Json(serde_json::json!({
|
||||
"setup_required": state.setup_required.load(Ordering::Relaxed),
|
||||
"enable_telemetry": config.system.enable_telemetry,
|
||||
"config_mutable": state.config_mutable
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) async fn setup_complete_handler(
|
||||
State(state): State<Arc<AppState>>,
|
||||
axum::Json(payload): axum::Json<SetupConfig>,
|
||||
) -> impl IntoResponse {
|
||||
if !state.setup_required.load(Ordering::Relaxed) {
|
||||
return (StatusCode::FORBIDDEN, "Setup already completed").into_response();
|
||||
}
|
||||
|
||||
let username = payload.username.trim();
|
||||
if username.len() < 3 {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"username must be at least 3 characters",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
if payload.password.len() < 8 {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"password must be at least 8 characters",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
if payload.settings.is_none() && payload.concurrent_jobs == 0 {
|
||||
return (StatusCode::BAD_REQUEST, "concurrent_jobs must be > 0").into_response();
|
||||
}
|
||||
if payload.settings.is_none() && !(0.0..=1.0).contains(&payload.size_reduction_threshold) {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"size_reduction_threshold must be 0.0-1.0",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
if payload.settings.is_none() && payload.min_bpp_threshold < 0.0 {
|
||||
return (StatusCode::BAD_REQUEST, "min_bpp_threshold must be >= 0.0").into_response();
|
||||
}
|
||||
|
||||
if !state.config_mutable {
|
||||
return config_write_blocked_response(state.config_path.as_path());
|
||||
}
|
||||
|
||||
let mut next_config = match payload.settings {
|
||||
Some(raw_settings) => {
|
||||
// Deserialize the frontend SetupSettings into Config,
|
||||
// tolerating unknown fields and missing optional fields.
|
||||
let mut settings: crate::config::Config = match serde_json::from_value(raw_settings) {
|
||||
Ok(c) => c,
|
||||
Err(err) => {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
format!(
|
||||
"Setup configuration is invalid: {}. \
|
||||
Please go back and check your settings.",
|
||||
err
|
||||
),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
settings.scanner.directories =
|
||||
match normalize_setup_directories(&settings.scanner.directories) {
|
||||
Ok(paths) => paths,
|
||||
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
};
|
||||
settings
|
||||
}
|
||||
None => {
|
||||
let setup_directories = match normalize_setup_directories(&payload.directories) {
|
||||
Ok(paths) => paths,
|
||||
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
|
||||
};
|
||||
let mut config = state.config.read().await.clone();
|
||||
config.transcode.concurrent_jobs = payload.concurrent_jobs;
|
||||
config.transcode.size_reduction_threshold = payload.size_reduction_threshold;
|
||||
config.transcode.min_bpp_threshold = payload.min_bpp_threshold;
|
||||
config.transcode.min_file_size_mb = payload.min_file_size_mb;
|
||||
config.transcode.output_codec = payload.output_codec;
|
||||
config.transcode.quality_profile = payload.quality_profile;
|
||||
config.hardware.allow_cpu_encoding = payload.allow_cpu_encoding;
|
||||
config.scanner.directories = setup_directories;
|
||||
config.system.enable_telemetry = payload.enable_telemetry;
|
||||
config
|
||||
}
|
||||
};
|
||||
next_config.scanner.watch_enabled = true;
|
||||
|
||||
if next_config.scanner.directories.is_empty() {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"At least one library directory must be configured.",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
if next_config.transcode.concurrent_jobs == 0 {
|
||||
return (
|
||||
StatusCode::BAD_REQUEST,
|
||||
"Concurrent jobs must be at least 1.",
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
if let Err(e) = next_config.validate() {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
let runtime_concurrent_jobs = next_config.transcode.concurrent_jobs;
|
||||
let runtime_engine_mode = next_config.system.engine_mode;
|
||||
|
||||
let (hardware_info, probe_log) =
|
||||
match crate::system::hardware::detect_hardware_with_log(&next_config).await {
|
||||
Ok(result) => result,
|
||||
Err(err) => return hardware_error_response(&err),
|
||||
};
|
||||
|
||||
if let Err(response) = save_config_or_response(&state, &next_config).await {
|
||||
return *response;
|
||||
}
|
||||
{
|
||||
let mut config_lock = state.config.write().await;
|
||||
*config_lock = next_config;
|
||||
}
|
||||
|
||||
// Create User and Initial Session after config persistence succeeds.
|
||||
let mut salt_bytes = [0u8; 16];
|
||||
if let Err(e) = OsRng.try_fill_bytes(&mut salt_bytes) {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to generate salt: {}", e),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
let salt = match SaltString::encode_b64(&salt_bytes) {
|
||||
Ok(salt) => salt,
|
||||
Err(e) => {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to encode salt: {}", e),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
let argon2 = Argon2::default();
|
||||
let password_hash = match argon2.hash_password(payload.password.as_bytes(), &salt) {
|
||||
Ok(h) => h.to_string(),
|
||||
Err(e) => {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Hashing failed: {}", e),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
|
||||
let user_id = match state.db.create_user(username, &password_hash).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to create user: {}", e),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
};
|
||||
|
||||
let token: String = rand::rng()
|
||||
.sample_iter(rand::distr::Alphanumeric)
|
||||
.take(64)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
let expires_at = Utc::now() + chrono::Duration::days(30);
|
||||
|
||||
if let Err(e) = state.db.create_session(user_id, &token, expires_at).await {
|
||||
return (
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Failed to create session: {}", e),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
// Update Setup State (Hot Reload)
|
||||
state.setup_required.store(false, Ordering::Relaxed);
|
||||
state.agent.set_manual_override(true);
|
||||
*state.agent.engine_mode.write().await = runtime_engine_mode;
|
||||
state
|
||||
.agent
|
||||
.set_concurrent_jobs(runtime_concurrent_jobs)
|
||||
.await;
|
||||
replace_runtime_hardware(state.as_ref(), hardware_info, probe_log).await;
|
||||
refresh_file_watcher(&state).await;
|
||||
|
||||
// Start Scan (optional, but good for UX)
|
||||
// Use library_scanner so the UI can track progress via /api/scan/status
|
||||
let scanner = state.library_scanner.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = scanner.start_scan().await {
|
||||
error!("Background initial scan failed: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
info!("Configuration saved via web setup. Auth info created.");
|
||||
|
||||
let cookie = build_session_cookie(&token);
|
||||
(
|
||||
[(header::SET_COOKIE, cookie)],
|
||||
axum::Json(serde_json::json!({
|
||||
"status": "saved",
|
||||
"message": "Setup completed successfully.",
|
||||
"concurrent_jobs": runtime_concurrent_jobs
|
||||
})),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
@@ -106,6 +106,15 @@ pub async fn preview(request: FsPreviewRequest) -> Result<FsPreviewResponse> {
|
||||
|
||||
fn browse_blocking(path: &Path) -> Result<FsBrowseResponse> {
|
||||
let path = canonical_or_original(path)?;
|
||||
|
||||
// Check if the resolved path is now in a sensitive location
|
||||
// (handles symlinks pointing to sensitive directories)
|
||||
if is_sensitive_path(&path) {
|
||||
return Err(AlchemistError::Watch(
|
||||
"Access to this directory is restricted".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let readable = path.is_dir();
|
||||
let mut warnings = directory_warnings(&path, readable);
|
||||
if !readable {
|
||||
@@ -121,17 +130,31 @@ fn browse_blocking(path: &Path) -> Result<FsBrowseResponse> {
|
||||
if !entry_path.is_dir() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Check for symlinks and warn about them
|
||||
let is_symlink = entry_path
|
||||
.symlink_metadata()
|
||||
.map(|m| m.file_type().is_symlink())
|
||||
.unwrap_or(false);
|
||||
|
||||
let name = entry.file_name().to_string_lossy().to_string();
|
||||
let hidden = is_hidden(&name, &entry_path);
|
||||
let readable = std::fs::read_dir(&entry_path).is_ok();
|
||||
let media_hint = classify_media_hint(&entry_path);
|
||||
|
||||
let warning = if is_symlink {
|
||||
Some("This is a symbolic link".to_string())
|
||||
} else {
|
||||
entry_warning(&entry_path, readable)
|
||||
};
|
||||
|
||||
Some(FsDirEntry {
|
||||
name,
|
||||
path: entry_path.to_string_lossy().to_string(),
|
||||
readable,
|
||||
hidden,
|
||||
media_hint,
|
||||
warning: entry_warning(&entry_path, readable),
|
||||
warning,
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
@@ -282,11 +305,76 @@ fn preview_blocking(request: FsPreviewRequest) -> Result<FsPreviewResponse> {
|
||||
|
||||
fn resolve_browse_path(path: Option<&str>) -> Result<PathBuf> {
|
||||
match path.map(str::trim).filter(|value| !value.is_empty()) {
|
||||
Some(value) => Ok(PathBuf::from(value)),
|
||||
Some(value) => {
|
||||
let path = PathBuf::from(value);
|
||||
|
||||
// Normalize and resolve the path
|
||||
let resolved = if path.exists() {
|
||||
std::fs::canonicalize(&path).map_err(AlchemistError::Io)?
|
||||
} else {
|
||||
path
|
||||
};
|
||||
|
||||
// Block sensitive system directories
|
||||
if is_sensitive_path(&resolved) {
|
||||
return Err(AlchemistError::Watch(
|
||||
"Access to this directory is restricted".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(resolved)
|
||||
}
|
||||
None => default_browse_root(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a path is a sensitive system directory that shouldn't be browsed.
|
||||
fn is_sensitive_path(path: &Path) -> bool {
|
||||
let path_str = path.to_string_lossy().to_lowercase();
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Block sensitive Unix system directories
|
||||
let sensitive_prefixes = [
|
||||
"/etc",
|
||||
"/var/log",
|
||||
"/var/run",
|
||||
"/proc",
|
||||
"/sys",
|
||||
"/dev",
|
||||
"/boot",
|
||||
"/root",
|
||||
"/private/etc", // macOS
|
||||
"/private/var/log",
|
||||
];
|
||||
|
||||
for prefix in sensitive_prefixes {
|
||||
if path_str == prefix || path_str.starts_with(&format!("{}/", prefix)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
// Block sensitive Windows system directories
|
||||
let sensitive_patterns = [
|
||||
"\\windows\\system32",
|
||||
"\\windows\\syswow64",
|
||||
"\\windows\\winsxs",
|
||||
"\\programdata\\microsoft",
|
||||
];
|
||||
|
||||
for pattern in sensitive_patterns {
|
||||
if path_str.contains(pattern) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn default_browse_root() -> Result<PathBuf> {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
|
||||
46
tests/fixtures/README.md
vendored
Normal file
46
tests/fixtures/README.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# Test Fixtures
|
||||
|
||||
This directory contains small video files for testing the FFmpeg integration.
|
||||
|
||||
## Generated Files
|
||||
|
||||
The following test fixtures are generated by `generate_fixtures.sh` (or manually via FFmpeg):
|
||||
|
||||
- `test_h264.mp4` - 1-second H.264 video (320x240, 30fps)
|
||||
- `test_hevc.mp4` - 1-second HEVC video (320x240, 30fps)
|
||||
- `test_h264_with_audio.mp4` - 1-second H.264 video with AAC audio
|
||||
- `test_h264_with_subtitles.mkv` - 1-second H.264 video with SRT subtitles
|
||||
- `test_subtitle.srt` - Simple SRT subtitle file
|
||||
|
||||
## Usage
|
||||
|
||||
These fixtures are used by the integration tests in:
|
||||
- `tests/integration_ffmpeg.rs` - Full pipeline tests (currently disabled due to server deps)
|
||||
- `tests/integration_ffmpeg_minimal.rs` - Minimal analyzer tests
|
||||
|
||||
## Regenerating Fixtures
|
||||
|
||||
If you need to regenerate the test files:
|
||||
|
||||
```bash
|
||||
cd tests/fixtures
|
||||
|
||||
# H.264 video
|
||||
ffmpeg -f lavfi -i testsrc=duration=1:size=320x240:rate=30 -c:v libx264 -pix_fmt yuv420p -y test_h264.mp4
|
||||
|
||||
# HEVC video
|
||||
ffmpeg -f lavfi -i testsrc=duration=1:size=320x240:rate=30 -c:v libx265 -pix_fmt yuv420p -y test_hevc.mp4
|
||||
|
||||
# H.264 with audio
|
||||
ffmpeg -f lavfi -i testsrc=duration=1:size=320x240:rate=30 -f lavfi -i sine=frequency=1000:duration=1 -c:v libx264 -c:a aac -shortest -pix_fmt yuv420p -y test_h264_with_audio.mp4
|
||||
|
||||
# Subtitle file
|
||||
echo -e "1\n00:00:00,000 --> 00:00:01,000\nTest subtitle" > test_subtitle.srt
|
||||
|
||||
# H.264 with subtitles
|
||||
ffmpeg -f lavfi -i testsrc=duration=1:size=320x240:rate=30 -i test_subtitle.srt -c:v libx264 -c:s srt -map 0 -map 1 -shortest -pix_fmt yuv420p -y test_h264_with_subtitles.mkv
|
||||
```
|
||||
|
||||
## File Sizes
|
||||
|
||||
All test files are deliberately small (< 20KB each) to keep the repository lightweight while providing adequate coverage for FFmpeg integration testing.
|
||||
BIN
tests/fixtures/test_h264.mp4
vendored
Normal file
BIN
tests/fixtures/test_h264.mp4
vendored
Normal file
Binary file not shown.
BIN
tests/fixtures/test_h264_with_audio.mp4
vendored
Normal file
BIN
tests/fixtures/test_h264_with_audio.mp4
vendored
Normal file
Binary file not shown.
BIN
tests/fixtures/test_h264_with_subtitles.mkv
vendored
Normal file
BIN
tests/fixtures/test_h264_with_subtitles.mkv
vendored
Normal file
Binary file not shown.
BIN
tests/fixtures/test_hevc.mp4
vendored
Normal file
BIN
tests/fixtures/test_hevc.mp4
vendored
Normal file
Binary file not shown.
3
tests/fixtures/test_subtitle.srt
vendored
Normal file
3
tests/fixtures/test_subtitle.srt
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
1
|
||||
00:00:00,000 --> 00:00:01,000
|
||||
Test subtitle
|
||||
@@ -294,6 +294,16 @@ where
|
||||
config.hardware.allow_cpu_fallback = true;
|
||||
configure(&mut config);
|
||||
|
||||
// Create event channels for the pipeline
|
||||
let (jobs_tx, _) = broadcast::channel(100);
|
||||
let (config_tx, _) = broadcast::channel(10);
|
||||
let (system_tx, _) = broadcast::channel(10);
|
||||
let event_channels = Arc::new(alchemist::db::EventChannels {
|
||||
jobs: jobs_tx,
|
||||
config: config_tx,
|
||||
system: system_tx,
|
||||
});
|
||||
|
||||
let pipeline = Pipeline::new(
|
||||
db.clone(),
|
||||
Arc::new(Transcoder::new()),
|
||||
@@ -306,6 +316,7 @@ where
|
||||
detection_notes: Vec::new(),
|
||||
})),
|
||||
Arc::new(broadcast::channel(16).0),
|
||||
event_channels,
|
||||
false,
|
||||
);
|
||||
|
||||
|
||||
591
tests/integration_ffmpeg.rs
Normal file
591
tests/integration_ffmpeg.rs
Normal file
@@ -0,0 +1,591 @@
|
||||
//! FFmpeg integration tests for Alchemist
|
||||
//!
|
||||
//! These tests verify the FFmpeg pipeline works correctly end-to-end.
|
||||
//! They require FFmpeg and FFprobe to be available on the system.
|
||||
|
||||
use alchemist::config::{Config, OutputCodec, SubtitleMode};
|
||||
use alchemist::db::{Db, JobState};
|
||||
use alchemist::media::analyzer::FfmpegAnalyzer;
|
||||
use alchemist::media::pipeline::{Analyzer, Pipeline};
|
||||
use alchemist::orchestrator::Transcoder;
|
||||
use alchemist::system::hardware::{HardwareInfo, HardwareState, Vendor};
|
||||
use anyhow::{Context, Result};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use tokio::sync::{RwLock, broadcast};
|
||||
|
||||
/// Check if FFmpeg is available on the system
|
||||
fn ffmpeg_available() -> bool {
|
||||
Command::new("ffmpeg")
|
||||
.arg("-version")
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.status()
|
||||
.map(|status| status.success())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Check if FFprobe is available on the system
|
||||
fn ffprobe_available() -> bool {
|
||||
Command::new("ffprobe")
|
||||
.arg("-version")
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.status()
|
||||
.map(|status| status.success())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Check if both FFmpeg and FFprobe are available
|
||||
fn ffmpeg_ready() -> bool {
|
||||
ffmpeg_available() && ffprobe_available()
|
||||
}
|
||||
|
||||
/// Get the path to test fixtures
|
||||
fn fixtures_path() -> PathBuf {
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push("tests");
|
||||
path.push("fixtures");
|
||||
path
|
||||
}
|
||||
|
||||
/// Create a temporary directory for test outputs
|
||||
fn temp_output_dir(test_name: &str) -> Result<PathBuf> {
|
||||
let mut path = std::env::temp_dir();
|
||||
path.push(format!(
|
||||
"alchemist_test_{}_{}",
|
||||
test_name,
|
||||
rand::random::<u64>()
|
||||
));
|
||||
std::fs::create_dir_all(&path)?;
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
/// Clean up temporary directory
|
||||
fn cleanup_temp_dir(path: &Path) {
|
||||
let _ = std::fs::remove_dir_all(path);
|
||||
}
|
||||
|
||||
/// Create a test database
|
||||
async fn create_test_db() -> Result<(Arc<Db>, PathBuf)> {
|
||||
let mut db_path = std::env::temp_dir();
|
||||
db_path.push(format!("alchemist_test_{}.db", rand::random::<u64>()));
|
||||
|
||||
let db = Arc::new(Db::new(db_path.to_string_lossy().as_ref()).await?);
|
||||
Ok((db, db_path))
|
||||
}
|
||||
|
||||
/// Build a test pipeline with custom configuration
|
||||
async fn build_test_pipeline<F>(configure: F) -> Result<(Arc<Db>, Pipeline, PathBuf)>
|
||||
where
|
||||
F: FnOnce(&mut Config),
|
||||
{
|
||||
let (db, db_path) = create_test_db().await?;
|
||||
|
||||
let mut config = Config::default();
|
||||
// Set sensible defaults for testing
|
||||
config.transcode.output_codec = OutputCodec::H264;
|
||||
config.transcode.min_file_size_mb = 0;
|
||||
config.transcode.min_bpp_threshold = 0.0;
|
||||
config.transcode.size_reduction_threshold = -1.0;
|
||||
config.quality.enable_vmaf = false;
|
||||
config.hardware.allow_cpu_encoding = true;
|
||||
config.hardware.allow_cpu_fallback = true;
|
||||
|
||||
// Apply custom configuration
|
||||
configure(&mut config);
|
||||
|
||||
// Create event channels for the pipeline
|
||||
let (jobs_tx, _) = broadcast::channel(100);
|
||||
let (config_tx, _) = broadcast::channel(10);
|
||||
let (system_tx, _) = broadcast::channel(10);
|
||||
let event_channels = Arc::new(alchemist::db::EventChannels {
|
||||
jobs: jobs_tx,
|
||||
config: config_tx,
|
||||
system: system_tx,
|
||||
});
|
||||
|
||||
let pipeline = Pipeline::new(
|
||||
db.clone(),
|
||||
Arc::new(Transcoder::new()),
|
||||
Arc::new(RwLock::new(config)),
|
||||
HardwareState::new(Some(HardwareInfo {
|
||||
vendor: Vendor::Cpu,
|
||||
device_path: None,
|
||||
supported_codecs: vec!["av1".to_string(), "hevc".to_string(), "h264".to_string()],
|
||||
backends: Vec::new(),
|
||||
detection_notes: Vec::new(),
|
||||
})),
|
||||
Arc::new(broadcast::channel(16).0),
|
||||
event_channels,
|
||||
false,
|
||||
);
|
||||
|
||||
Ok((db, pipeline, db_path))
|
||||
}
|
||||
|
||||
/// Enqueue and process a transcode job
|
||||
async fn enqueue_and_process(
|
||||
db: &Db,
|
||||
pipeline: &Pipeline,
|
||||
input: &Path,
|
||||
output: &Path,
|
||||
) -> Result<JobState> {
|
||||
db.enqueue_job(input, output, SystemTime::UNIX_EPOCH)
|
||||
.await?;
|
||||
|
||||
let job = db
|
||||
.get_job_by_input_path(input.to_string_lossy().as_ref())
|
||||
.await?
|
||||
.context("job missing")?;
|
||||
|
||||
if let Err(failure) = pipeline.process_job(job.clone()).await {
|
||||
let logs = db.get_logs(50, 0).await.unwrap_or_default();
|
||||
let details = logs
|
||||
.into_iter()
|
||||
.filter(|entry| entry.job_id == Some(job.id))
|
||||
.map(|entry| entry.message)
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
anyhow::bail!("job failed with {:?}\n{}", failure, details);
|
||||
}
|
||||
|
||||
let updated_job = db
|
||||
.get_job_by_id(job.id)
|
||||
.await?
|
||||
.context("updated job missing")?;
|
||||
|
||||
Ok(updated_job.status)
|
||||
}
|
||||
|
||||
/// Get stream count by type from FFprobe
|
||||
fn get_stream_count(path: &Path, stream_type: &str) -> Result<usize> {
|
||||
let output = Command::new("ffprobe")
|
||||
.args([
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
stream_type,
|
||||
"-show_entries",
|
||||
"stream=index",
|
||||
"-of",
|
||||
"csv=p=0",
|
||||
])
|
||||
.arg(path)
|
||||
.output()
|
||||
.context("ffprobe failed")?;
|
||||
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"ffprobe failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
let count = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.filter(|line| !line.trim().is_empty())
|
||||
.count();
|
||||
|
||||
Ok(count)
|
||||
}
|
||||
|
||||
/// Get codec name for a specific stream type
|
||||
fn get_codec_name(path: &Path, stream_type: &str) -> Result<Option<String>> {
|
||||
let output = Command::new("ffprobe")
|
||||
.args([
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
stream_type,
|
||||
"-show_entries",
|
||||
"stream=codec_name",
|
||||
"-of",
|
||||
"csv=p=0",
|
||||
])
|
||||
.arg(path)
|
||||
.output()
|
||||
.context("ffprobe failed")?;
|
||||
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"ffprobe failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
let codec = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.next()
|
||||
.map(|line| line.trim().to_string())
|
||||
.filter(|codec| !codec.is_empty());
|
||||
|
||||
Ok(codec)
|
||||
}
|
||||
|
||||
/// Verify that a file exists and has the expected codec
|
||||
fn verify_output_codec(path: &Path, expected_codec: &str) -> Result<()> {
|
||||
assert!(
|
||||
path.exists(),
|
||||
"Output file should exist: {}",
|
||||
path.display()
|
||||
);
|
||||
|
||||
let actual_codec = get_codec_name(path, "v:0")?.context("No video codec found in output")?;
|
||||
|
||||
assert_eq!(
|
||||
actual_codec, expected_codec,
|
||||
"Expected codec {}, got {}",
|
||||
expected_codec, actual_codec
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_h264_to_hevc_cpu_transcode() -> Result<()> {
|
||||
if !ffmpeg_ready() {
|
||||
println!("Skipping test: FFmpeg not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let temp_dir = temp_output_dir("h264_to_hevc")?;
|
||||
let output = temp_dir.join("output_hevc.mp4");
|
||||
|
||||
let (db, pipeline, db_path) = build_test_pipeline(|config| {
|
||||
config.transcode.output_codec = OutputCodec::Hevc;
|
||||
})
|
||||
.await?;
|
||||
|
||||
let state = enqueue_and_process(db.as_ref(), &pipeline, &input, &output).await?;
|
||||
|
||||
assert_eq!(
|
||||
state,
|
||||
JobState::Completed,
|
||||
"Job should complete successfully"
|
||||
);
|
||||
verify_output_codec(&output, "hevc")?;
|
||||
|
||||
// Cleanup
|
||||
let _ = std::fs::remove_file(db_path);
|
||||
cleanup_temp_dir(&temp_dir);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_video_analysis() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
let analysis = analyzer.analyze(&input).await?;
|
||||
|
||||
// Verify basic analysis results
|
||||
assert_eq!(analysis.metadata.width, 320, "Expected width 320");
|
||||
assert_eq!(analysis.metadata.height, 240, "Expected height 240");
|
||||
assert!(
|
||||
!analysis.metadata.codec_name.is_empty(),
|
||||
"Video codec should be detected"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.duration_secs > 0.0,
|
||||
"Duration should be greater than 0"
|
||||
);
|
||||
|
||||
// Verify streams - we can check if there are subtitle/audio streams in metadata
|
||||
// For video-only files, audio codec should be None and subtitle streams empty
|
||||
assert!(
|
||||
analysis.metadata.audio_codec.is_none(),
|
||||
"Should not have audio codec in video-only file"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.subtitle_streams.is_empty(),
|
||||
"Should not have subtitle streams in video-only file"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_audio_stream_handling() -> Result<()> {
|
||||
if !ffmpeg_ready() {
|
||||
println!("Skipping test: FFmpeg not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264_with_audio.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let temp_dir = temp_output_dir("audio_handling")?;
|
||||
let output = temp_dir.join("output_with_audio.mp4");
|
||||
|
||||
let (db, pipeline, db_path) = build_test_pipeline(|config| {
|
||||
config.transcode.output_codec = OutputCodec::H264;
|
||||
})
|
||||
.await?;
|
||||
|
||||
let state = enqueue_and_process(db.as_ref(), &pipeline, &input, &output).await?;
|
||||
|
||||
assert_eq!(
|
||||
state,
|
||||
JobState::Completed,
|
||||
"Job should complete successfully"
|
||||
);
|
||||
|
||||
// Verify video and audio streams
|
||||
let video_count = get_stream_count(&output, "v")?;
|
||||
let audio_count = get_stream_count(&output, "a")?;
|
||||
|
||||
assert_eq!(video_count, 1, "Should have one video stream");
|
||||
assert_eq!(audio_count, 1, "Should have one audio stream");
|
||||
|
||||
// Cleanup
|
||||
let _ = std::fs::remove_file(db_path);
|
||||
cleanup_temp_dir(&temp_dir);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_subtitle_extraction() -> Result<()> {
|
||||
if !ffmpeg_ready() {
|
||||
println!("Skipping test: FFmpeg not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264_with_subtitles.mkv");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let temp_dir = temp_output_dir("subtitle_extraction")?;
|
||||
let output = temp_dir.join("output_no_subs.mkv");
|
||||
|
||||
let (db, pipeline, db_path) = build_test_pipeline(|config| {
|
||||
config.transcode.subtitle_mode = SubtitleMode::Extract;
|
||||
config.transcode.output_codec = OutputCodec::H264;
|
||||
})
|
||||
.await?;
|
||||
|
||||
let state = enqueue_and_process(db.as_ref(), &pipeline, &input, &output).await?;
|
||||
|
||||
assert_eq!(
|
||||
state,
|
||||
JobState::Completed,
|
||||
"Job should complete successfully"
|
||||
);
|
||||
|
||||
// Verify main output has no subtitle streams
|
||||
let subtitle_count = get_stream_count(&output, "s")?;
|
||||
assert_eq!(
|
||||
subtitle_count, 0,
|
||||
"Main output should have no subtitle streams"
|
||||
);
|
||||
|
||||
// Check for sidecar subtitle files (basic check)
|
||||
let sidecar_files: Vec<_> = std::fs::read_dir(&temp_dir)?
|
||||
.filter_map(|entry| entry.ok())
|
||||
.map(|entry| entry.path())
|
||||
.filter(|path| path.extension().and_then(|ext| ext.to_str()) == Some("srt"))
|
||||
.collect();
|
||||
|
||||
// Should have extracted at least one subtitle file
|
||||
assert!(
|
||||
!sidecar_files.is_empty(),
|
||||
"Should have extracted subtitle files"
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
let _ = std::fs::remove_file(db_path);
|
||||
cleanup_temp_dir(&temp_dir);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_input_formats() -> Result<()> {
|
||||
if !ffmpeg_ready() {
|
||||
println!("Skipping test: FFmpeg not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let test_files = vec![("test_h264.mp4", "h264"), ("test_hevc.mp4", "hevc")];
|
||||
|
||||
for (filename, expected_input_codec) in test_files {
|
||||
let input = fixtures.join(filename);
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping {}: Fixture file not found", filename);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Verify input codec first
|
||||
let input_codec =
|
||||
get_codec_name(&input, "v:0")?.context("No video codec found in input")?;
|
||||
assert_eq!(
|
||||
input_codec, expected_input_codec,
|
||||
"Expected input codec {}",
|
||||
expected_input_codec
|
||||
);
|
||||
|
||||
let temp_dir = temp_output_dir(&format!("multi_format_{}", expected_input_codec))?;
|
||||
let output = temp_dir.join("output.mp4");
|
||||
|
||||
let (db, pipeline, db_path) = build_test_pipeline(|config| {
|
||||
config.transcode.output_codec = OutputCodec::H264;
|
||||
})
|
||||
.await?;
|
||||
|
||||
let state = enqueue_and_process(db.as_ref(), &pipeline, &input, &output).await?;
|
||||
|
||||
assert_eq!(
|
||||
state,
|
||||
JobState::Completed,
|
||||
"Job should complete successfully for {}",
|
||||
filename
|
||||
);
|
||||
verify_output_codec(&output, "h264")?;
|
||||
|
||||
// Cleanup
|
||||
let _ = std::fs::remove_file(db_path);
|
||||
cleanup_temp_dir(&temp_dir);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_analyzer_stream_detection() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
|
||||
// Test video-only file
|
||||
let video_only = fixtures.join("test_h264.mp4");
|
||||
if video_only.exists() {
|
||||
let analysis = analyzer.analyze(&video_only).await?;
|
||||
assert!(
|
||||
!analysis.metadata.codec_name.is_empty(),
|
||||
"Should detect video codec"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.audio_codec.is_none(),
|
||||
"Should not detect audio codec in video-only file"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.subtitle_streams.is_empty(),
|
||||
"Should not detect subtitle streams in video-only file"
|
||||
);
|
||||
}
|
||||
|
||||
// Test video+audio file
|
||||
let video_audio = fixtures.join("test_h264_with_audio.mp4");
|
||||
if video_audio.exists() {
|
||||
let analysis = analyzer.analyze(&video_audio).await?;
|
||||
assert!(
|
||||
!analysis.metadata.codec_name.is_empty(),
|
||||
"Should detect video codec"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.audio_codec.is_some(),
|
||||
"Should detect audio codec"
|
||||
);
|
||||
}
|
||||
|
||||
// Test video+subtitle file
|
||||
let video_subs = fixtures.join("test_h264_with_subtitles.mkv");
|
||||
if video_subs.exists() {
|
||||
let analysis = analyzer.analyze(&video_subs).await?;
|
||||
assert!(
|
||||
!analysis.metadata.codec_name.is_empty(),
|
||||
"Should detect video codec"
|
||||
);
|
||||
assert!(
|
||||
!analysis.metadata.subtitle_streams.is_empty(),
|
||||
"Should detect subtitle streams"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod hardware_fallback_tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cpu_fallback_when_hardware_unavailable() -> Result<()> {
|
||||
if !ffmpeg_ready() {
|
||||
println!("Skipping test: FFmpeg not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let temp_dir = temp_output_dir("cpu_fallback")?;
|
||||
let output = temp_dir.join("output_fallback.mp4");
|
||||
|
||||
let (db, pipeline, db_path) = build_test_pipeline(|config| {
|
||||
config.transcode.output_codec = OutputCodec::Hevc;
|
||||
config.hardware.allow_cpu_encoding = true;
|
||||
config.hardware.allow_cpu_fallback = true;
|
||||
// Simulate hardware being unavailable by only allowing CPU
|
||||
})
|
||||
.await?;
|
||||
|
||||
let state = enqueue_and_process(db.as_ref(), &pipeline, &input, &output).await?;
|
||||
|
||||
// Should complete with CPU fallback
|
||||
assert_eq!(
|
||||
state,
|
||||
JobState::Completed,
|
||||
"Job should complete with CPU fallback"
|
||||
);
|
||||
verify_output_codec(&output, "hevc")?;
|
||||
|
||||
// Cleanup
|
||||
let _ = std::fs::remove_file(db_path);
|
||||
cleanup_temp_dir(&temp_dir);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
305
tests/integration_ffmpeg_minimal.rs
Normal file
305
tests/integration_ffmpeg_minimal.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
//! Minimal FFmpeg integration tests for Alchemist
|
||||
//!
|
||||
//! These tests verify the FFmpeg components work correctly without
|
||||
//! requiring the full server infrastructure.
|
||||
|
||||
use alchemist::media::analyzer::FfmpegAnalyzer;
|
||||
use alchemist::media::pipeline::Analyzer;
|
||||
use anyhow::Result;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
/// Check if FFmpeg is available on the system
|
||||
fn ffmpeg_available() -> bool {
|
||||
Command::new("ffmpeg")
|
||||
.arg("-version")
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.status()
|
||||
.map(|status| status.success())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Check if FFprobe is available on the system
|
||||
fn ffprobe_available() -> bool {
|
||||
Command::new("ffprobe")
|
||||
.arg("-version")
|
||||
.stdout(std::process::Stdio::null())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.status()
|
||||
.map(|status| status.success())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Check if both FFmpeg and FFprobe are available
|
||||
fn ffmpeg_ready() -> bool {
|
||||
ffmpeg_available() && ffprobe_available()
|
||||
}
|
||||
|
||||
/// Get the path to test fixtures
|
||||
fn fixtures_path() -> PathBuf {
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
path.push("tests");
|
||||
path.push("fixtures");
|
||||
path
|
||||
}
|
||||
|
||||
/// Get codec name for a specific stream type using ffprobe
|
||||
fn get_codec_name(path: &Path, stream_type: &str) -> Result<Option<String>> {
|
||||
let output = Command::new("ffprobe")
|
||||
.args([
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
stream_type,
|
||||
"-show_entries",
|
||||
"stream=codec_name",
|
||||
"-of",
|
||||
"csv=p=0",
|
||||
])
|
||||
.arg(path)
|
||||
.output()?;
|
||||
|
||||
if !output.status.success() {
|
||||
anyhow::bail!(
|
||||
"ffprobe failed: {}",
|
||||
String::from_utf8_lossy(&output.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
let codec = String::from_utf8_lossy(&output.stdout)
|
||||
.lines()
|
||||
.next()
|
||||
.map(|line| line.trim().to_string())
|
||||
.filter(|codec| !codec.is_empty());
|
||||
|
||||
Ok(codec)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ffmpeg_analyzer_h264() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
let analysis = analyzer.analyze(&input).await?;
|
||||
|
||||
// Verify basic analysis results
|
||||
assert_eq!(analysis.metadata.width, 320, "Expected width 320");
|
||||
assert_eq!(analysis.metadata.height, 240, "Expected height 240");
|
||||
assert_eq!(analysis.metadata.codec_name, "h264", "Expected H.264 codec");
|
||||
assert!(
|
||||
analysis.metadata.duration_secs > 0.0,
|
||||
"Duration should be greater than 0"
|
||||
);
|
||||
assert!(analysis.metadata.fps > 0.0, "FPS should be greater than 0");
|
||||
|
||||
// Verify streams - video-only file should have no audio codec
|
||||
assert!(
|
||||
analysis.metadata.audio_codec.is_none(),
|
||||
"Should not have audio codec in video-only file"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.subtitle_streams.is_empty(),
|
||||
"Should not have subtitle streams in video-only file"
|
||||
);
|
||||
|
||||
println!("✓ H.264 analysis test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ffmpeg_analyzer_hevc() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_hevc.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
let analysis = analyzer.analyze(&input).await?;
|
||||
|
||||
// Verify basic analysis results
|
||||
assert_eq!(analysis.metadata.width, 320, "Expected width 320");
|
||||
assert_eq!(analysis.metadata.height, 240, "Expected height 240");
|
||||
assert_eq!(analysis.metadata.codec_name, "hevc", "Expected HEVC codec");
|
||||
assert!(
|
||||
analysis.metadata.duration_secs > 0.0,
|
||||
"Duration should be greater than 0"
|
||||
);
|
||||
|
||||
println!("✓ HEVC analysis test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ffmpeg_analyzer_audio() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264_with_audio.mp4");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
let analysis = analyzer.analyze(&input).await?;
|
||||
|
||||
// Verify basic analysis results
|
||||
assert_eq!(
|
||||
analysis.metadata.codec_name, "h264",
|
||||
"Expected H.264 video codec"
|
||||
);
|
||||
assert!(
|
||||
analysis.metadata.audio_codec.is_some(),
|
||||
"Should have audio codec"
|
||||
);
|
||||
|
||||
// Check audio metadata
|
||||
if let Some(audio_codec) = &analysis.metadata.audio_codec {
|
||||
assert_eq!(audio_codec, "aac", "Expected AAC audio codec");
|
||||
}
|
||||
|
||||
println!("✓ Audio stream analysis test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ffmpeg_analyzer_subtitles() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264_with_subtitles.mkv");
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping test: Fixture file not found: {}", input.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
let analysis = analyzer.analyze(&input).await?;
|
||||
|
||||
// Verify basic analysis results
|
||||
assert_eq!(
|
||||
analysis.metadata.codec_name, "h264",
|
||||
"Expected H.264 video codec"
|
||||
);
|
||||
assert!(
|
||||
!analysis.metadata.subtitle_streams.is_empty(),
|
||||
"Should have subtitle streams"
|
||||
);
|
||||
|
||||
// Check subtitle metadata
|
||||
let subtitle = &analysis.metadata.subtitle_streams[0];
|
||||
assert_eq!(subtitle.codec_name, "subrip", "Expected SRT subtitle codec");
|
||||
|
||||
println!("✓ Subtitle stream analysis test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ffmpeg_availability() -> Result<()> {
|
||||
println!("FFmpeg available: {}", ffmpeg_available());
|
||||
println!("FFprobe available: {}", ffprobe_available());
|
||||
println!("FFmpeg ready: {}", ffmpeg_ready());
|
||||
|
||||
if ffmpeg_ready() {
|
||||
// Test basic ffprobe functionality
|
||||
let fixtures = fixtures_path();
|
||||
let input = fixtures.join("test_h264.mp4");
|
||||
|
||||
if input.exists() {
|
||||
let codec = get_codec_name(&input, "v:0")?;
|
||||
assert_eq!(
|
||||
codec,
|
||||
Some("h264".to_string()),
|
||||
"Expected H.264 codec from ffprobe"
|
||||
);
|
||||
println!("✓ Direct ffprobe test passed");
|
||||
}
|
||||
}
|
||||
|
||||
println!("✓ FFmpeg availability test completed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_multiple_format_analysis() -> Result<()> {
|
||||
if !ffprobe_available() {
|
||||
println!("Skipping test: FFprobe not available");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let fixtures = fixtures_path();
|
||||
let analyzer = FfmpegAnalyzer;
|
||||
|
||||
let test_files = vec![("test_h264.mp4", "h264"), ("test_hevc.mp4", "hevc")];
|
||||
|
||||
for (filename, expected_codec) in test_files {
|
||||
let input = fixtures.join(filename);
|
||||
|
||||
if !input.exists() {
|
||||
println!("Skipping {}: Fixture file not found", filename);
|
||||
continue;
|
||||
}
|
||||
|
||||
let analysis = analyzer.analyze(&input).await?;
|
||||
assert_eq!(
|
||||
analysis.metadata.codec_name, expected_codec,
|
||||
"Expected {} codec for {}",
|
||||
expected_codec, filename
|
||||
);
|
||||
|
||||
println!("✓ {} format analysis passed", filename);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fixture_files_exist() {
|
||||
let fixtures = fixtures_path();
|
||||
println!("Fixtures path: {}", fixtures.display());
|
||||
|
||||
let expected_files = vec![
|
||||
"test_h264.mp4",
|
||||
"test_hevc.mp4",
|
||||
"test_h264_with_audio.mp4",
|
||||
"test_h264_with_subtitles.mkv",
|
||||
"test_subtitle.srt",
|
||||
];
|
||||
|
||||
for filename in expected_files {
|
||||
let path = fixtures.join(filename);
|
||||
if path.exists() {
|
||||
println!("✓ Found fixture: {}", filename);
|
||||
} else {
|
||||
println!("⚠ Missing fixture: {}", filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -217,10 +217,10 @@ export default function HardwareSettings() {
|
||||
<div className="flex flex-col gap-6" aria-live="polite">
|
||||
<div className="flex items-center justify-between pb-2 border-b border-helios-line/10">
|
||||
<div>
|
||||
<h3 className="text-base font-bold text-helios-ink tracking-tight uppercase tracking-[0.1em]">Transcoding Hardware</h3>
|
||||
<h3 className="text-base font-bold text-helios-ink tracking-tight">Transcoding Hardware</h3>
|
||||
<p className="text-xs text-helios-slate mt-0.5">Detected acceleration engines and codec support.</p>
|
||||
</div>
|
||||
<div className={`p-2 ${details.bg} rounded-xl ${details.color}`}>
|
||||
<div className={`p-2 ${details.bg} rounded-lg ${details.color}`}>
|
||||
{vendor === "cpu" ? <Cpu size={20} /> : <Zap size={20} />}
|
||||
</div>
|
||||
</div>
|
||||
@@ -228,18 +228,18 @@ export default function HardwareSettings() {
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
<div className="bg-helios-surface border border-helios-line/30 rounded-lg p-5 shadow-sm">
|
||||
<div className="flex items-center gap-3 mb-4">
|
||||
<div className={`p-2.5 rounded-xl ${details.bg} ${details.color}`}>
|
||||
<div className={`p-2.5 rounded-lg ${details.bg} ${details.color}`}>
|
||||
<HardDrive size={18} />
|
||||
</div>
|
||||
<div>
|
||||
<h4 className="text-sm font-bold text-helios-ink uppercase tracking-wider">Active Device</h4>
|
||||
<p className="text-[10px] text-helios-slate font-bold">{details.name} {details.tech}</p>
|
||||
<h4 className="text-sm font-bold text-helios-ink">Active Device</h4>
|
||||
<p className="text-xs text-helios-slate font-bold">{details.name} {details.tech}</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="space-y-4">
|
||||
<div>
|
||||
<span className="text-xs font-medium text-helios-slate uppercase tracking-wide block mb-1.5 ml-0.5">Device Path</span>
|
||||
<span className="text-xs font-medium text-helios-slate block mb-1.5 ml-0.5">Device Path</span>
|
||||
<div className="bg-helios-surface-soft border border-helios-line/30 rounded-lg px-3 py-2 font-mono text-xs text-helios-ink shadow-inner">
|
||||
{info.device_path || (vendor === "nvidia" ? "NVIDIA Driver (Direct)" : "Auto-detected Interface")}
|
||||
</div>
|
||||
@@ -249,18 +249,18 @@ export default function HardwareSettings() {
|
||||
|
||||
<div className="bg-helios-surface border border-helios-line/30 rounded-lg p-5 shadow-sm">
|
||||
<div className="flex items-center gap-3 mb-4">
|
||||
<div className="p-2.5 rounded-xl bg-purple-500/10 text-purple-500">
|
||||
<div className="p-2.5 rounded-lg bg-purple-500/10 text-purple-500">
|
||||
<CheckCircle2 size={18} />
|
||||
</div>
|
||||
<div>
|
||||
<h4 className="text-sm font-bold text-helios-ink uppercase tracking-wider">Codec Support</h4>
|
||||
<p className="text-[10px] text-helios-slate font-bold">Hardware verified encoders</p>
|
||||
<h4 className="text-sm font-bold text-helios-ink">Codec Support</h4>
|
||||
<p className="text-xs text-helios-slate font-bold">Hardware verified encoders</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{info.supported_codecs.length > 0 ? info.supported_codecs.map(codec => (
|
||||
<div key={codec} className="px-3 py-1.5 rounded-lg bg-emerald-500/10 border border-emerald-500/20 text-emerald-500 text-xs font-bold uppercase tracking-wider flex items-center gap-2">
|
||||
<div key={codec} className="px-3 py-1.5 rounded-lg bg-emerald-500/10 border border-emerald-500/20 text-emerald-500 text-xs font-bold flex items-center gap-2">
|
||||
<div className="w-1.5 h-1.5 rounded-full bg-emerald-500" />
|
||||
{codec}
|
||||
</div>
|
||||
@@ -289,7 +289,7 @@ export default function HardwareSettings() {
|
||||
<div className="flex gap-3">
|
||||
<AlertCircle className="text-helios-solar shrink-0" size={18} />
|
||||
<div className="space-y-1">
|
||||
<h5 className="text-sm font-bold text-helios-ink uppercase tracking-wider">CPU Fallback Active</h5>
|
||||
<h5 className="text-sm font-bold text-helios-ink">CPU Fallback Active</h5>
|
||||
<p className="text-xs text-helios-slate leading-relaxed">
|
||||
GPU acceleration was not detected or is incompatible. Alchemist will use software encoding (SVT-AV1 / x264), which is significantly more resource intensive.
|
||||
</p>
|
||||
@@ -346,7 +346,7 @@ export default function HardwareSettings() {
|
||||
</span>
|
||||
</summary>
|
||||
<div className="mt-2 space-y-2">
|
||||
<p className="text-[11px] text-helios-slate">
|
||||
<p className="text-xs text-helios-slate">
|
||||
{entry.backend}
|
||||
{entry.device_path ? ` • ${entry.device_path}` : ""}
|
||||
</p>
|
||||
@@ -373,12 +373,12 @@ export default function HardwareSettings() {
|
||||
>
|
||||
<div className="flex items-center justify-between">
|
||||
<div className="flex items-center gap-3">
|
||||
<div className="p-2.5 rounded-xl bg-blue-500/10 text-blue-500">
|
||||
<div className="p-2.5 rounded-lg bg-blue-500/10 text-blue-500">
|
||||
<Cpu size={18} />
|
||||
</div>
|
||||
<div>
|
||||
<h4 className="text-sm font-bold text-helios-ink uppercase tracking-wider">CPU Encoding</h4>
|
||||
<p className="text-[10px] text-helios-slate font-bold">
|
||||
<h4 className="text-sm font-bold text-helios-ink">CPU Encoding</h4>
|
||||
<p className="text-xs text-helios-slate font-bold">
|
||||
{settings.allow_cpu_encoding ? "Enabled - CPU can be used for encoding" : "Disabled - GPU only mode"}
|
||||
</p>
|
||||
</div>
|
||||
@@ -398,14 +398,14 @@ export default function HardwareSettings() {
|
||||
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-4 border-t border-helios-line/10 pt-5">
|
||||
<div className="space-y-2">
|
||||
<label htmlFor="hardware-preferred-vendor" className="text-xs font-medium uppercase tracking-wide text-helios-slate">Preferred Vendor</label>
|
||||
<label htmlFor="hardware-preferred-vendor" className="text-xs font-medium text-helios-slate">Preferred Vendor</label>
|
||||
<select
|
||||
id="hardware-preferred-vendor"
|
||||
value={settings.preferred_vendor ?? ""}
|
||||
onChange={(e) => void saveImmediateSettings({
|
||||
preferred_vendor: e.target.value || null,
|
||||
})}
|
||||
className="w-full rounded-xl border border-helios-line/30 bg-helios-surface px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full rounded-lg border border-helios-line/30 bg-helios-surface px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
>
|
||||
<option value="">Auto-detect</option>
|
||||
<option value="nvidia">NVIDIA</option>
|
||||
@@ -417,12 +417,12 @@ export default function HardwareSettings() {
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
<label htmlFor="hardware-cpu-preset" className="text-xs font-medium uppercase tracking-wide text-helios-slate">CPU Preset</label>
|
||||
<label htmlFor="hardware-cpu-preset" className="text-xs font-medium text-helios-slate">CPU Preset</label>
|
||||
<select
|
||||
id="hardware-cpu-preset"
|
||||
value={settings.cpu_preset}
|
||||
onChange={(e) => void saveImmediateSettings({ cpu_preset: e.target.value })}
|
||||
className="w-full rounded-xl border border-helios-line/30 bg-helios-surface px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full rounded-lg border border-helios-line/30 bg-helios-surface px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
>
|
||||
<option value="slow">Slow</option>
|
||||
<option value="medium">Medium</option>
|
||||
@@ -434,8 +434,8 @@ export default function HardwareSettings() {
|
||||
|
||||
<div className="rounded-lg border border-helios-line/20 bg-helios-surface-soft/60 p-4 flex items-center justify-between">
|
||||
<div>
|
||||
<p className="text-xs font-bold uppercase tracking-wider text-helios-slate">Allow CPU Fallback</p>
|
||||
<p className="text-[10px] text-helios-slate mt-1">Permit software encoding when the preferred GPU path is unavailable.</p>
|
||||
<p className="text-xs font-bold text-helios-slate">Allow CPU Fallback</p>
|
||||
<p className="text-xs text-helios-slate mt-1">Permit software encoding when the preferred GPU path is unavailable.</p>
|
||||
</div>
|
||||
<label className="relative inline-flex items-center cursor-pointer">
|
||||
<input
|
||||
@@ -452,8 +452,8 @@ export default function HardwareSettings() {
|
||||
|
||||
<div className="border-t border-helios-line/10 pt-5 space-y-3">
|
||||
<div>
|
||||
<h4 className="text-sm font-bold text-helios-ink uppercase tracking-wider">Explicit Device Path</h4>
|
||||
<p className="text-[10px] text-helios-slate font-bold mt-1">
|
||||
<h4 className="text-sm font-bold text-helios-ink">Explicit Device Path</h4>
|
||||
<p className="text-xs text-helios-slate font-bold mt-1">
|
||||
Optional — Linux only. Pin QSV or VAAPI detection to a specific render node, or leave blank to auto-detect.
|
||||
</p>
|
||||
</div>
|
||||
@@ -473,9 +473,9 @@ export default function HardwareSettings() {
|
||||
}
|
||||
}}
|
||||
placeholder="Optional — Linux only (e.g. /dev/dri/renderD128)"
|
||||
className="flex-1 bg-helios-surface-soft border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink font-mono text-sm focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="flex-1 bg-helios-surface-soft border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink font-mono text-sm focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate">
|
||||
<p className="text-xs text-helios-slate">
|
||||
Saves on blur or Enter. Other hardware changes will also carry the current device-path draft if you tab or click away.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
@@ -129,6 +129,21 @@ export default function LogViewer() {
|
||||
}
|
||||
});
|
||||
|
||||
eventSource.addEventListener("lagged", () => {
|
||||
showToast({
|
||||
kind: "warning",
|
||||
title: "Connection interrupted",
|
||||
message: "Refreshing data…",
|
||||
});
|
||||
void fetchHistory();
|
||||
eventSource?.close();
|
||||
eventSource = null;
|
||||
if (reconnectTimeoutRef.current !== null) {
|
||||
window.clearTimeout(reconnectTimeoutRef.current);
|
||||
}
|
||||
reconnectTimeoutRef.current = window.setTimeout(connect, 100);
|
||||
});
|
||||
|
||||
eventSource.onerror = () => {
|
||||
eventSource?.close();
|
||||
eventSource = null;
|
||||
|
||||
@@ -132,22 +132,22 @@ export default function TranscodeSettings() {
|
||||
<div className="flex flex-col gap-6">
|
||||
<div className="flex items-center justify-between pb-2 border-b border-helios-line/10">
|
||||
<div>
|
||||
<h3 className="text-base font-bold text-helios-ink tracking-tight uppercase tracking-[0.1em]">Transcoding Engine</h3>
|
||||
<h3 className="text-base font-bold text-helios-ink tracking-tight">Transcoding Engine</h3>
|
||||
<p className="text-xs text-helios-slate mt-0.5">Configure encoder behavior and performance limits.</p>
|
||||
</div>
|
||||
<div className="p-2 bg-helios-solar/10 rounded-xl text-helios-solar">
|
||||
<div className="p-2 bg-helios-solar/10 rounded-lg text-helios-solar">
|
||||
<Cpu size={20} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{error && (
|
||||
<div className="p-4 bg-red-500/10 border border-red-500/20 text-red-500 rounded-xl text-sm font-semibold">
|
||||
<div className="p-4 bg-red-500/10 border border-red-500/20 text-red-500 rounded-lg text-sm font-semibold">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{success && (
|
||||
<div className="p-4 bg-green-500/10 border border-green-500/20 text-green-500 rounded-xl text-sm font-semibold">
|
||||
<div className="p-4 bg-green-500/10 border border-green-500/20 text-green-500 rounded-lg text-sm font-semibold">
|
||||
Settings saved successfully.
|
||||
</div>
|
||||
)}
|
||||
@@ -155,7 +155,7 @@ export default function TranscodeSettings() {
|
||||
<div className="grid gap-6 md:grid-cols-2">
|
||||
{/* Codec Selection */}
|
||||
<div className="md:col-span-2 space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Video size={14} /> Preferred Codec
|
||||
</label>
|
||||
<div className="grid grid-cols-1 sm:grid-cols-3 gap-4">
|
||||
@@ -200,7 +200,7 @@ export default function TranscodeSettings() {
|
||||
|
||||
{/* Quality Profile */}
|
||||
<div className="md:col-span-2 space-y-3 pt-4">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Gauge size={14} /> Quality Profile
|
||||
</label>
|
||||
<div className="grid grid-cols-1 sm:grid-cols-3 gap-3">
|
||||
@@ -223,8 +223,8 @@ export default function TranscodeSettings() {
|
||||
|
||||
<div className="md:col-span-2 flex items-center justify-between rounded-lg border border-helios-line/20 bg-helios-surface-soft/60 p-4">
|
||||
<div>
|
||||
<p className="text-xs font-bold uppercase tracking-wider text-helios-slate">Allow Fallback</p>
|
||||
<p className="text-[10px] text-helios-slate mt-1">If preferred codec is unavailable, use the best available fallback.</p>
|
||||
<p className="text-xs font-bold text-helios-slate">Allow Fallback</p>
|
||||
<p className="text-xs text-helios-slate mt-1">If preferred codec is unavailable, use the best available fallback.</p>
|
||||
</div>
|
||||
<div className="relative inline-flex items-center cursor-pointer">
|
||||
<input
|
||||
@@ -239,13 +239,13 @@ export default function TranscodeSettings() {
|
||||
</div>
|
||||
|
||||
<div className="md:col-span-2 space-y-3 pt-2">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Film size={14} /> Subtitle Handling
|
||||
</label>
|
||||
<select
|
||||
value={settings.subtitle_mode}
|
||||
onChange={(e) => setSettings({ ...settings, subtitle_mode: e.target.value as TranscodeSettingsPayload["subtitle_mode"] })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
>
|
||||
<option value="copy">Copy subtitles</option>
|
||||
<option value="none">Drop subtitles</option>
|
||||
@@ -261,14 +261,14 @@ export default function TranscodeSettings() {
|
||||
</div>
|
||||
|
||||
<div className="md:col-span-2 space-y-4 pt-2">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Film size={14} /> Stream Rules
|
||||
</label>
|
||||
|
||||
<div className="flex items-center justify-between rounded-lg border border-helios-line/20 bg-helios-surface-soft/60 p-4">
|
||||
<div>
|
||||
<p className="text-xs font-bold uppercase tracking-wider text-helios-slate">Strip commentary tracks</p>
|
||||
<p className="text-[10px] text-helios-slate mt-1">Adds built-in title keywords for common commentary tracks.</p>
|
||||
<p className="text-xs font-bold text-helios-slate">Strip commentary tracks</p>
|
||||
<p className="text-xs text-helios-slate mt-1">Adds built-in title keywords for common commentary tracks.</p>
|
||||
</div>
|
||||
<div className="relative inline-flex items-center cursor-pointer">
|
||||
<input
|
||||
@@ -309,7 +309,7 @@ export default function TranscodeSettings() {
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Strip Audio Tracks By Title Keyword
|
||||
</label>
|
||||
<input
|
||||
@@ -321,15 +321,15 @@ export default function TranscodeSettings() {
|
||||
})
|
||||
}
|
||||
placeholder="commentary, director's commentary"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">
|
||||
<p className="text-xs text-helios-slate ml-1">
|
||||
Audio tracks whose title contains any of these words will be removed. Separate multiple keywords with commas.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Keep Only These Audio Languages
|
||||
</label>
|
||||
<input
|
||||
@@ -341,17 +341,17 @@ export default function TranscodeSettings() {
|
||||
})
|
||||
}
|
||||
placeholder="eng, jpn"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">
|
||||
<p className="text-xs text-helios-slate ml-1">
|
||||
Only keep audio tracks matching these language codes. Tracks with no language tag are always kept. Leave blank to keep all languages.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="flex items-center justify-between rounded-lg border border-helios-line/20 bg-helios-surface-soft/60 p-4">
|
||||
<div>
|
||||
<p className="text-xs font-bold uppercase tracking-wider text-helios-slate">Keep only default audio track</p>
|
||||
<p className="text-[10px] text-helios-slate mt-1">Strip all audio tracks except the one marked as default by the source file.</p>
|
||||
<p className="text-xs font-bold text-helios-slate">Keep only default audio track</p>
|
||||
<p className="text-xs text-helios-slate mt-1">Strip all audio tracks except the one marked as default by the source file.</p>
|
||||
</div>
|
||||
<div className="relative inline-flex items-center cursor-pointer">
|
||||
<input
|
||||
@@ -372,7 +372,7 @@ export default function TranscodeSettings() {
|
||||
|
||||
{/* HDR + Tonemapping */}
|
||||
<div className="md:col-span-2 space-y-3 pt-2">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Film size={14} /> HDR Handling
|
||||
</label>
|
||||
<div className="grid grid-cols-1 sm:grid-cols-2 gap-4">
|
||||
@@ -406,24 +406,24 @@ export default function TranscodeSettings() {
|
||||
{settings.hdr_mode === "tonemap" && (
|
||||
<>
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Gauge size={14} /> Tonemap Algorithm
|
||||
</label>
|
||||
<select
|
||||
value={settings.tonemap_algorithm}
|
||||
onChange={(e) => setSettings({ ...settings, tonemap_algorithm: e.target.value as TranscodeSettingsPayload["tonemap_algorithm"] })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
>
|
||||
<option value="hable">Hable</option>
|
||||
<option value="mobius">Mobius</option>
|
||||
<option value="reinhard">Reinhard</option>
|
||||
<option value="clip">Clip</option>
|
||||
</select>
|
||||
<p className="text-[10px] text-helios-slate ml-1">Choose the tone curve for HDR → SDR conversion.</p>
|
||||
<p className="text-xs text-helios-slate ml-1">Choose the tone curve for HDR → SDR conversion.</p>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Scale size={14} /> Tonemap Peak (nits)
|
||||
</label>
|
||||
<input
|
||||
@@ -432,13 +432,13 @@ export default function TranscodeSettings() {
|
||||
max="1000"
|
||||
value={settings.tonemap_peak}
|
||||
onChange={(e) => setSettings({ ...settings, tonemap_peak: parseFloat(e.target.value) || 100 })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">Peak brightness used for tone mapping.</p>
|
||||
<p className="text-xs text-helios-slate ml-1">Peak brightness used for tone mapping.</p>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Zap size={14} /> Tonemap Desaturation
|
||||
</label>
|
||||
<input
|
||||
@@ -448,16 +448,16 @@ export default function TranscodeSettings() {
|
||||
step="0.1"
|
||||
value={settings.tonemap_desat}
|
||||
onChange={(e) => setSettings({ ...settings, tonemap_desat: parseFloat(e.target.value) || 0 })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">Reduce oversaturated highlights after tonemapping.</p>
|
||||
<p className="text-xs text-helios-slate ml-1">Reduce oversaturated highlights after tonemapping.</p>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Numeric Inputs */}
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Cpu size={14} /> Encoding Threads (libsvtav1/x265)
|
||||
</label>
|
||||
<input
|
||||
@@ -465,13 +465,13 @@ export default function TranscodeSettings() {
|
||||
min="0"
|
||||
value={settings.threads}
|
||||
onChange={(e) => setSettings({ ...settings, threads: parseInt(e.target.value) || 0 })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">Number of threads to allocate for software encoding (0 = Auto).</p>
|
||||
<p className="text-xs text-helios-slate ml-1">Number of threads to allocate for software encoding (0 = Auto).</p>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Zap size={14} /> Concurrent Jobs
|
||||
</label>
|
||||
<input
|
||||
@@ -480,13 +480,13 @@ export default function TranscodeSettings() {
|
||||
max="8"
|
||||
value={settings.concurrent_jobs}
|
||||
onChange={(e) => setSettings({ ...settings, concurrent_jobs: parseInt(e.target.value) || 1 })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">Maximum number of files to process simultaneously.</p>
|
||||
<p className="text-xs text-helios-slate ml-1">Maximum number of files to process simultaneously.</p>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Scale size={14} /> Min. Reduction (%)
|
||||
</label>
|
||||
<input
|
||||
@@ -496,13 +496,13 @@ export default function TranscodeSettings() {
|
||||
step="5"
|
||||
value={Math.round(settings.size_reduction_threshold * 100)}
|
||||
onChange={(e) => setSettings({ ...settings, size_reduction_threshold: (parseInt(e.target.value) || 0) / 100 })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
<p className="text-[10px] text-helios-slate ml-1">Files must shrink by at least this percentage or they are reverted.</p>
|
||||
<p className="text-xs text-helios-slate ml-1">Files must shrink by at least this percentage or they are reverted.</p>
|
||||
</div>
|
||||
|
||||
<div className="space-y-3">
|
||||
<label className="text-xs font-bold uppercase tracking-wider text-helios-slate flex items-center gap-2">
|
||||
<label className="text-xs font-bold text-helios-slate flex items-center gap-2">
|
||||
<Film size={14} /> Min. File Size (MB)
|
||||
</label>
|
||||
<input
|
||||
@@ -510,7 +510,7 @@ export default function TranscodeSettings() {
|
||||
min="0"
|
||||
value={settings.min_file_size_mb}
|
||||
onChange={(e) => setSettings({ ...settings, min_file_size_mb: parseInt(e.target.value) || 0 })}
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-xl px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/30 rounded-lg px-4 py-3 text-helios-ink focus:border-helios-solar focus:ring-1 focus:ring-helios-solar outline-none transition-all"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -324,7 +324,7 @@ export default function WatchFolders() {
|
||||
<button
|
||||
onClick={() => void triggerScan()}
|
||||
disabled={scanning}
|
||||
className="flex items-center gap-2 px-3 py-1.5 bg-helios-solar/10 hover:bg-helios-solar/20 text-helios-solar rounded-lg text-xs font-bold uppercase tracking-wider transition-colors disabled:opacity-50"
|
||||
className="flex items-center gap-2 px-3 py-1.5 bg-helios-solar/10 hover:bg-helios-solar/20 text-helios-solar rounded-lg text-xs font-bold transition-colors disabled:opacity-50"
|
||||
>
|
||||
<Play size={14} className={scanning ? "animate-spin" : ""} />
|
||||
{scanning ? "Scanning..." : "Scan Now"}
|
||||
@@ -340,8 +340,8 @@ export default function WatchFolders() {
|
||||
<form onSubmit={addDir} className="space-y-3">
|
||||
<div className="space-y-3 rounded-lg border border-helios-line/20 bg-helios-surface-soft/50 p-4">
|
||||
<div>
|
||||
<h3 className="text-sm font-bold text-helios-ink uppercase tracking-wider">Library Directories</h3>
|
||||
<p className="text-[10px] text-helios-slate mt-1">
|
||||
<h3 className="text-sm font-bold text-helios-ink">Library Directories</h3>
|
||||
<p className="text-xs text-helios-slate mt-1">
|
||||
Canonical library roots from setup/TOML. These are stored in the main config file and synchronized into runtime watchers.
|
||||
</p>
|
||||
</div>
|
||||
@@ -353,13 +353,13 @@ export default function WatchFolders() {
|
||||
value={libraryPath}
|
||||
onChange={(e) => setLibraryPath(e.target.value)}
|
||||
placeholder="Add library directory..."
|
||||
className="w-full bg-helios-surface border border-helios-line/20 rounded-xl pl-10 pr-4 py-2.5 text-sm text-helios-ink placeholder:text-helios-slate/40 focus:border-helios-solar focus:ring-1 focus:ring-helios-solar/50 outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/20 rounded-lg pl-10 pr-4 py-2.5 text-sm text-helios-ink placeholder:text-helios-slate/40 focus:border-helios-solar focus:ring-1 focus:ring-helios-solar/50 outline-none transition-all"
|
||||
/>
|
||||
</div>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setPickerOpen("library")}
|
||||
className="rounded-xl border border-helios-line/30 bg-helios-surface px-4 py-2.5 text-sm font-medium text-helios-ink"
|
||||
className="rounded-lg border border-helios-line/30 bg-helios-surface px-4 py-2.5 text-sm font-medium text-helios-ink"
|
||||
>
|
||||
Browse
|
||||
</button>
|
||||
@@ -367,14 +367,14 @@ export default function WatchFolders() {
|
||||
type="button"
|
||||
onClick={() => void addLibraryDir()}
|
||||
disabled={!libraryPath.trim() || syncingLibrary}
|
||||
className="bg-helios-solar hover:bg-helios-solar-dark text-helios-surface px-5 py-2.5 rounded-xl font-medium text-sm transition-colors disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2 shadow-sm shadow-helios-solar/20"
|
||||
className="bg-helios-solar hover:bg-helios-solar-dark text-helios-surface px-5 py-2.5 rounded-lg font-medium text-sm transition-colors disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2 shadow-sm shadow-helios-solar/20"
|
||||
>
|
||||
<Plus size={16} /> Add Library
|
||||
</button>
|
||||
</div>
|
||||
<div className="space-y-2">
|
||||
{libraryDirs.map((dir) => (
|
||||
<div key={dir} className="flex items-center justify-between rounded-xl border border-helios-line/10 bg-helios-surface px-3 py-2">
|
||||
<div key={dir} className="flex items-center justify-between rounded-lg border border-helios-line/10 bg-helios-surface px-3 py-2">
|
||||
<span className="truncate font-mono text-sm text-helios-ink" title={dir}>{dir}</span>
|
||||
<button
|
||||
type="button"
|
||||
@@ -400,20 +400,20 @@ export default function WatchFolders() {
|
||||
value={path}
|
||||
onChange={(e) => setPath(e.target.value)}
|
||||
placeholder="Enter full directory path..."
|
||||
className="w-full bg-helios-surface border border-helios-line/20 rounded-xl pl-10 pr-4 py-2.5 text-sm text-helios-ink placeholder:text-helios-slate/40 focus:border-helios-solar focus:ring-1 focus:ring-helios-solar/50 outline-none transition-all"
|
||||
className="w-full bg-helios-surface border border-helios-line/20 rounded-lg pl-10 pr-4 py-2.5 text-sm text-helios-ink placeholder:text-helios-slate/40 focus:border-helios-solar focus:ring-1 focus:ring-helios-solar/50 outline-none transition-all"
|
||||
/>
|
||||
</div>
|
||||
<button
|
||||
type="button"
|
||||
onClick={() => setPickerOpen("watch")}
|
||||
className="rounded-xl border border-helios-line/30 bg-helios-surface px-4 py-2.5 text-sm font-medium text-helios-ink"
|
||||
className="rounded-lg border border-helios-line/30 bg-helios-surface px-4 py-2.5 text-sm font-medium text-helios-ink"
|
||||
>
|
||||
Browse
|
||||
</button>
|
||||
<button
|
||||
type="submit"
|
||||
disabled={!path.trim()}
|
||||
className="bg-helios-solar hover:bg-helios-solar-dark text-helios-surface px-5 py-2.5 rounded-xl font-medium text-sm transition-colors disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2 shadow-sm shadow-helios-solar/20"
|
||||
className="bg-helios-solar hover:bg-helios-solar-dark text-helios-surface px-5 py-2.5 rounded-lg font-medium text-sm transition-colors disabled:opacity-50 disabled:cursor-not-allowed flex items-center gap-2 shadow-sm shadow-helios-solar/20"
|
||||
>
|
||||
<Plus size={16} /> Add
|
||||
</button>
|
||||
@@ -431,7 +431,7 @@ export default function WatchFolders() {
|
||||
|
||||
<div className="space-y-2">
|
||||
{dirs.map((dir) => (
|
||||
<div key={dir.id} className="flex flex-col gap-3 p-3 bg-helios-surface border border-helios-line/10 rounded-xl group hover:border-helios-line/30 hover:shadow-sm transition-all">
|
||||
<div key={dir.id} className="flex flex-col gap-3 p-3 bg-helios-surface border border-helios-line/10 rounded-lg group hover:border-helios-line/30 hover:shadow-sm transition-all">
|
||||
<div className="flex items-center justify-between gap-3">
|
||||
<div className="flex items-center gap-3 overflow-hidden">
|
||||
<div className="p-1.5 bg-helios-slate/5 rounded-lg text-helios-slate">
|
||||
@@ -440,7 +440,7 @@ export default function WatchFolders() {
|
||||
<span className="text-sm font-mono text-helios-ink truncate max-w-[400px]" title={dir.path}>
|
||||
{dir.path}
|
||||
</span>
|
||||
<span className="rounded-full border border-helios-line/20 px-2 py-0.5 text-[10px] font-bold uppercase tracking-wider text-helios-slate">
|
||||
<span className="rounded-full border border-helios-line/20 px-2 py-0.5 text-xs font-bold text-helios-slate">
|
||||
{dir.is_recursive ? "Recursive" : "Top level"}
|
||||
</span>
|
||||
</div>
|
||||
@@ -464,7 +464,7 @@ export default function WatchFolders() {
|
||||
);
|
||||
}}
|
||||
disabled={assigningDirId === dir.id}
|
||||
className="w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-2.5 text-sm text-helios-ink outline-none focus:border-helios-solar disabled:opacity-60"
|
||||
className="w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-2.5 text-sm text-helios-ink outline-none focus:border-helios-solar disabled:opacity-60"
|
||||
>
|
||||
<option value="">No profile (use global settings)</option>
|
||||
{builtinProfiles.map((profile) => (
|
||||
@@ -525,7 +525,7 @@ export default function WatchFolders() {
|
||||
|
||||
{customizeDir && profileDraft ? (
|
||||
<div className="fixed inset-0 z-[100] flex items-center justify-center bg-black/60 px-4 backdrop-blur-sm">
|
||||
<div className="w-full max-w-2xl rounded-xl border border-helios-line/20 bg-helios-surface p-6 shadow-2xl">
|
||||
<div className="w-full max-w-2xl rounded-lg border border-helios-line/20 bg-helios-surface p-6 shadow-2xl">
|
||||
<div className="flex items-start justify-between gap-4">
|
||||
<div>
|
||||
<h3 className="text-lg font-semibold text-helios-ink">Customize Profile</h3>
|
||||
@@ -548,24 +548,24 @@ export default function WatchFolders() {
|
||||
<form onSubmit={saveCustomProfile} className="mt-6 space-y-4">
|
||||
<div className="grid grid-cols-1 gap-4 md:grid-cols-2">
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Name
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
value={profileDraft.name}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, name: event.target.value })}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
/>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Starting preset
|
||||
</label>
|
||||
<select
|
||||
value={profileDraft.preset}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, preset: event.target.value as ProfileDraft["preset"] })}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
>
|
||||
{presets.map((preset) => (
|
||||
<option key={preset.id} value={preset.preset}>
|
||||
@@ -575,13 +575,13 @@ export default function WatchFolders() {
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Codec
|
||||
</label>
|
||||
<select
|
||||
value={profileDraft.codec}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, codec: event.target.value as ProfileDraft["codec"] })}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
>
|
||||
<option value="av1">AV1</option>
|
||||
<option value="hevc">HEVC</option>
|
||||
@@ -589,13 +589,13 @@ export default function WatchFolders() {
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Quality profile
|
||||
</label>
|
||||
<select
|
||||
value={profileDraft.quality_profile}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, quality_profile: event.target.value as ProfileDraft["quality_profile"] })}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
>
|
||||
<option value="speed">Speed</option>
|
||||
<option value="balanced">Balanced</option>
|
||||
@@ -603,26 +603,26 @@ export default function WatchFolders() {
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
HDR mode
|
||||
</label>
|
||||
<select
|
||||
value={profileDraft.hdr_mode}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, hdr_mode: event.target.value as ProfileDraft["hdr_mode"] })}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
>
|
||||
<option value="preserve">Preserve</option>
|
||||
<option value="tonemap">Tonemap</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Audio mode
|
||||
</label>
|
||||
<select
|
||||
value={profileDraft.audio_mode}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, audio_mode: event.target.value as ProfileDraft["audio_mode"] })}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
>
|
||||
<option value="copy">Copy</option>
|
||||
<option value="aac">AAC</option>
|
||||
@@ -630,7 +630,7 @@ export default function WatchFolders() {
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
CRF override
|
||||
</label>
|
||||
<input
|
||||
@@ -638,20 +638,20 @@ export default function WatchFolders() {
|
||||
value={profileDraft.crf_override}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, crf_override: event.target.value })}
|
||||
placeholder="Leave blank to use the preset default"
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="text-[10px] font-bold uppercase tracking-widest text-helios-slate">
|
||||
<label className="text-xs font-bold text-helios-slate">
|
||||
Notes
|
||||
</label>
|
||||
<textarea
|
||||
value={profileDraft.notes}
|
||||
onChange={(event) => setProfileDraft({ ...profileDraft, notes: event.target.value })}
|
||||
rows={3}
|
||||
className="mt-2 w-full rounded-xl border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
className="mt-2 w-full rounded-lg border border-helios-line/20 bg-helios-surface-soft px-4 py-3 text-helios-ink outline-none focus:border-helios-solar"
|
||||
/>
|
||||
</div>
|
||||
|
||||
@@ -659,7 +659,7 @@ export default function WatchFolders() {
|
||||
<button
|
||||
type="submit"
|
||||
disabled={savingProfile}
|
||||
className="rounded-xl bg-helios-solar px-5 py-3 text-sm font-semibold text-helios-main disabled:opacity-60"
|
||||
className="rounded-lg bg-helios-solar px-5 py-3 text-sm font-semibold text-helios-main disabled:opacity-60"
|
||||
>
|
||||
{savingProfile ? "Saving..." : "Save Custom Profile"}
|
||||
</button>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useEffect, useMemo, useState } from "react";
|
||||
import { AlertCircle, CheckCircle2, Info, X, type LucideIcon } from "lucide-react";
|
||||
import { AlertCircle, AlertTriangle, CheckCircle2, Info, X, type LucideIcon } from "lucide-react";
|
||||
import { subscribeToToasts, type ToastKind, type ToastMessage } from "../../lib/toast";
|
||||
|
||||
const DEFAULT_DURATION_MS = 3500;
|
||||
@@ -18,6 +18,12 @@ function kindStyles(kind: ToastKind): { icon: LucideIcon; className: string } {
|
||||
className: "border-status-error/30 bg-status-error/10 text-status-error",
|
||||
};
|
||||
}
|
||||
if (kind === "warning") {
|
||||
return {
|
||||
icon: AlertTriangle,
|
||||
className: "border-amber-500/30 bg-amber-500/10 text-amber-500",
|
||||
};
|
||||
}
|
||||
return {
|
||||
icon: Info,
|
||||
className: "border-helios-line/40 bg-helios-surface text-helios-ink",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
export type ToastKind = "success" | "error" | "info";
|
||||
export type ToastKind = "success" | "error" | "info" | "warning";
|
||||
|
||||
export interface ToastInput {
|
||||
kind: ToastKind;
|
||||
|
||||
Reference in New Issue
Block a user