diff --git a/.agents/skills/caveman/SKILL.md b/.agents/skills/caveman/SKILL.md
new file mode 100644
index 0000000..2ab498b
--- /dev/null
+++ b/.agents/skills/caveman/SKILL.md
@@ -0,0 +1,67 @@
+---
+name: caveman
+description: >
+ Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman
+ while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra,
+ wenyan-lite, wenyan-full, wenyan-ultra.
+ Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens",
+ "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested.
+---
+
+Respond terse like smart caveman. All technical substance stay. Only fluff die.
+
+## Persistence
+
+ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode".
+
+Default: **full**. Switch: `/caveman lite|full|ultra`.
+
+## Rules
+
+Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact.
+
+Pattern: `[thing] [action] [reason]. [next step].`
+
+Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..."
+Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:"
+
+## Intensity
+
+| Level | What change |
+|-------|------------|
+| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight |
+| **full** | Drop articles, fragments OK, short synonyms. Classic caveman |
+| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough |
+| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register |
+| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) |
+| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse |
+
+Example — "Why React component re-render?"
+- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`."
+- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`."
+- ultra: "Inline obj prop → new ref → re-render. `useMemo`."
+- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。"
+- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。"
+- wenyan-ultra: "新參照→重繪。useMemo Wrap。"
+
+Example — "Explain database connection pooling."
+- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead."
+- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead."
+- ultra: "Pool = reuse DB conn. Skip handshake → fast under load."
+- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。"
+- wenyan-ultra: "池reuse conn。skip handshake → fast。"
+
+## Auto-Clarity
+
+Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done.
+
+Example — destructive op:
+> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone.
+> ```sql
+> DROP TABLE users;
+> ```
+> Caveman resume. Verify backup exist first.
+
+## Boundaries
+
+Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end.
\ No newline at end of file
diff --git a/.claude/settings.local.json b/.claude/settings.local.json
index 6471280..3066837 100644
--- a/.claude/settings.local.json
+++ b/.claude/settings.local.json
@@ -12,7 +12,22 @@
"Bash(bash --version)",
"Bash(git tag:*)",
"Bash(cargo clippy:*)",
- "Bash(bun run:*)"
+ "Bash(bun run:*)",
+ "Bash(ls /Users/brooklyn/data/alchemist/*.md)",
+ "Bash(ls /Users/brooklyn/data/alchemist/docs/*.md)",
+ "Bash(npx skills:*)",
+ "Bash(find /Users/brooklyn/data/alchemist/web -name tailwind.config.* -o -name *.config.ts -o -name *.config.js)",
+ "Bash(just check-web:*)",
+ "Bash(git stash:*)",
+ "Bash(just test-e2e:*)",
+ "Bash(bunx tsc:*)",
+ "Bash(wait)",
+ "Bash(npx playwright:*)",
+ "Bash(just check-rust:*)",
+ "Bash(cargo fmt:*)",
+ "Bash(cargo test:*)",
+ "Bash(just check:*)",
+ "Bash(just test:*)"
]
}
}
diff --git a/.claude/skills/caveman b/.claude/skills/caveman
new file mode 120000
index 0000000..9016aac
--- /dev/null
+++ b/.claude/skills/caveman
@@ -0,0 +1 @@
+../../.agents/skills/caveman
\ No newline at end of file
diff --git a/.idea/alchemist.iml b/.idea/alchemist.iml
index 568915c..5adcd65 100644
--- a/.idea/alchemist.iml
+++ b/.idea/alchemist.iml
@@ -2,7 +2,7 @@
-
+
@@ -13,6 +13,5 @@
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
index 5cb71ef..320340d 100644
--- a/.idea/inspectionProfiles/Project_Default.xml
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -1,6 +1,8 @@
+
+
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 6d1af97..551598e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,28 @@
All notable changes to this project will be documented in this file.
+## [0.3.1-rc.3] - 2026-04-12
+
+### New Features
+
+#### Job Management Refactor
+- **Componentized Job Manager** — extracted monolithic `JobManager.tsx` into a modular suite under `web/src/components/jobs/`, including dedicated components for the toolbar, table, and detail modal.
+- **Enhanced Job Detail Modal** — rebuilt the job detail view with better loading states, smoother transitions, and improved information hierarchy for analysis, decisions, and failure reasons.
+- **Job SSE Hook** — unified job-related Server-Sent Events logic into a custom `useJobSSE` hook for better state management and reduced re-renders.
+
+#### Themes & UX
+- **Midnight OLED+** — enhanced the `midnight` theme with true-black surfaces and suppressed decorative gradients to maximize OLED power savings.
+- **Improved Toasts** — toast notifications now feature a high-quality backdrop blur and refined border styling for better visibility against busy backgrounds.
+
+#### Reliability & Observability
+- **Engine Lifecycle Specs** — added a comprehensive Playwright suite for validating engine transitions (Running -> Draining -> Paused -> Stopped).
+- **Planner & Lifecycle Docs** — added detailed technical documentation for the transcoding planner logic and engine state machine.
+- **Encode Attempt Tracking** — added a database migration to track individual encode attempts, laying the groundwork for more granular retry statistics.
+
+#### Hardware & Performance
+- **Concurrency & Speed Optimizations** — internal refinements to the executor and processor to improve hardware utilization and address reported speed issues on certain platforms.
+- **Backlog Grooming** — updated `TODO.md` with a focus on validating AMF and VAAPI AV1 hardware encoders.
+
## [0.3.1-rc.1] - 2026-04-08
### New Features
diff --git a/Cargo.lock b/Cargo.lock
index 7144fb5..2feb7a7 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13,7 +13,7 @@ dependencies = [
[[package]]
name = "alchemist"
-version = "0.3.1-rc.1"
+version = "0.3.1-rc.3"
dependencies = [
"anyhow",
"argon2",
diff --git a/Cargo.toml b/Cargo.toml
index b3b3a55..2858197 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "alchemist"
-version = "0.3.1-rc.1"
+version = "0.3.1-rc.3"
edition = "2024"
rust-version = "1.85"
license = "GPL-3.0"
diff --git a/TODO.md b/TODO.md
index 7a37d96..56b70f2 100644
--- a/TODO.md
+++ b/TODO.md
@@ -1,21 +1,8 @@
# Todo List
-Remove `src/wizard.rs` from the project, the web setup handles it.. maybe keep for CLI users?
+## AMD / VAAPI / AMF
-## Frontend
-
-- Rework the Jobs screen sorting/filter island so it uses space more intelligently on narrow screens and overflows in a controlled, intentional-looking way instead of overflowing awkwardly.
-- Make the toast across all pages blur the background instead of reading as transparent.
-- Fix the Jobs modal so active jobs do not show `Waiting for analysis` while encoding/remuxing is already in progress.
-- Reduce the stop/drain redundancy in the header so pressing Stop does not leave both the button and the status pill saying `Stopping`.
-- Make the `midnight` OLED theme truly black, without gray treatment or shared gradients.
-
-## Backend
-
-- Investigate why encoding is very slow on macOS even when hardware acceleration is selected.
-- Investigate why so many jobs are skipped and why only one job appears to run at a time even when concurrent jobs are enabled.
-- Fix the clippy error that is currently blocking CI/CD.
-
-## Jobs / UX
-
-- Improve failed-job explanations on the Jobs screen when the current failure summary is weak or missing.
+- Validate `av1_vaapi` on real Linux VAAPI hardware — confirm encode succeeds with current args.
+- Validate `av1_amf` on real Windows AMF hardware — confirm encode succeeds with current args.
+- If either encoder needs quality/rate-control params, apply the same pattern as the VideoToolbox fix (add `rate_control: Option<&RateControl>` to `vaapi::append_args` and `amf::append_args`).
+- Update support claims in README and UI only after validation passes.
diff --git a/VERSION b/VERSION
index c16a70a..21ad887 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.3.1-rc.1
+0.3.1-rc.3
diff --git a/audit.md b/audit.md
new file mode 100644
index 0000000..62321a4
--- /dev/null
+++ b/audit.md
@@ -0,0 +1,136 @@
+# Audit Findings
+
+Date: 2026-04-11
+
+## Summary
+
+This audit focused on the highest-risk paths in Alchemist:
+
+- queue claiming and cancellation
+- media planning and execution
+- conversion validation
+- setup/auth exposure
+- job detail and failure UX
+
+The current automated checks were green at audit time, but several real
+correctness and behavior issues remain.
+
+## Findings
+
+### [P1] Canceling a job during analysis can be overwritten
+
+Relevant code:
+
+- `src/server/jobs.rs:41`
+- `src/media/pipeline.rs:927`
+- `src/media/pipeline.rs:970`
+- `src/orchestrator.rs:239`
+
+`request_job_cancel()` marks `analyzing` and `resuming` jobs as
+`cancelled` immediately. But the analysis/planning path can still run to
+completion and later overwrite that state to `skipped`,
+`encoding`/`remuxing`, or another follow-on state.
+
+The transcoder-side `pending_cancels` check only applies around FFmpeg
+spawn, so a cancel issued during analysis is not guaranteed to stop the
+pipeline before state transitions are persisted.
+
+Impact:
+
+- a user-visible cancel can be lost
+- the UI can report a cancelled job that later resumes or becomes skipped
+- queue state becomes harder to trust
+
+### [P1] VideoToolbox quality controls are effectively a no-op
+
+Relevant code:
+
+- `src/config.rs:85`
+- `src/media/planner.rs:633`
+- `src/media/ffmpeg/videotoolbox.rs:3`
+- `src/conversion.rs:424`
+
+The config still defines a VideoToolbox quality ladder, and the planner
+still emits `RateControl::Cq` for VideoToolbox encoders. But the actual
+VideoToolbox FFmpeg builder ignores rate-control input entirely.
+
+The Convert workflow does the same thing by still generating `Cq` for
+non-CPU/QSV encoders even though the VideoToolbox path does not consume
+it.
+
+Impact:
+
+- quality profile does not meaningfully affect VideoToolbox jobs
+- Convert quality values for VideoToolbox are misleading
+- macOS throughput/quality tradeoffs are harder to reason about
+
+### [P2] Convert does not reuse subtitle/container compatibility checks
+
+Relevant code:
+
+- `src/media/planner.rs:863`
+- `src/media/planner.rs:904`
+- `src/conversion.rs:272`
+- `src/conversion.rs:366`
+
+The main library planner explicitly rejects unsafe subtitle-copy
+combinations, especially for MP4/MOV targets. The Convert flow has its
+own normalization/build path and does not reuse that validation.
+
+Impact:
+
+- the Convert UI can accept settings that are known to fail later in FFmpeg
+- conversion behavior diverges from library-job behavior
+- users can hit avoidable execution-time errors instead of fast validation
+
+### [P2] Completed job details omit metadata at the API layer
+
+Relevant code:
+
+- `src/server/jobs.rs:344`
+- `web/src/components/JobManager.tsx:1774`
+
+The job detail endpoint explicitly returns `metadata = None` for
+`completed` jobs, even though the Jobs modal is structured to display
+input metadata when available.
+
+Impact:
+
+- completed-job details are structurally incomplete
+- the frontend needs special-case empty-state behavior
+- operator confidence is lower when comparing completed jobs after the fact
+
+### [P2] LAN-only setup is easy to misconfigure behind a local reverse proxy
+
+Relevant code:
+
+- `src/server/middleware.rs:269`
+- `src/server/middleware.rs:300`
+
+The setup gate uses `request_ip()` and trusts forwarded headers only when
+the direct peer is local/private. If Alchemist sits behind a loopback or
+LAN reverse proxy that fails to forward the real client IP, the request
+falls back to the proxy peer IP and is treated as LAN-local.
+
+Impact:
+
+- public reverse-proxy deployments can accidentally expose setup
+- behavior depends on correct proxy header forwarding
+- the security model is sound in principle but fragile in deployment
+
+## What To Fix First
+
+1. Fix the cancel-during-analysis race.
+2. Fix or redesign VideoToolbox quality handling so the UI and planner do
+ not promise controls that the backend ignores.
+3. Reuse planner validation in Convert for subtitle/container safety.
+4. Decide whether completed jobs should persist and return metadata in the
+ detail API.
+
+## What To Investigate Next
+
+1. Use runtime diagnostics to confirm whether macOS slowness is true
+ hardware underperformance, silent fallback, or filter overhead.
+2. Verify whether “only one job at a time” is caused by actual worker
+ serialization or by planner eligibility/skips.
+3. Review dominant skip reasons before relaxing planner heuristics.
diff --git a/docs/docs/changelog.md b/docs/docs/changelog.md
index b72234f..6ffadc8 100644
--- a/docs/docs/changelog.md
+++ b/docs/docs/changelog.md
@@ -3,6 +3,28 @@ title: Changelog
description: Release history for Alchemist.
---
+## [0.3.1-rc.3] - 2026-04-12
+
+### New Features
+
+#### Job Management Refactor
+- **Componentized Job Manager** — extracted monolithic `JobManager.tsx` into a modular suite under `web/src/components/jobs/`, including dedicated components for the toolbar, table, and detail modal.
+- **Enhanced Job Detail Modal** — rebuilt the job detail view with better loading states, smoother transitions, and improved information hierarchy for analysis, decisions, and failure reasons.
+- **Job SSE Hook** — unified job-related Server-Sent Events logic into a custom `useJobSSE` hook for better state management and reduced re-renders.
+
+#### Themes & UX
+- **Midnight OLED+** — enhanced the `midnight` theme with true-black surfaces and suppressed decorative gradients to maximize OLED power savings.
+- **Improved Toasts** — toast notifications now feature a high-quality backdrop blur and refined border styling for better visibility against busy backgrounds.
+
+#### Reliability & Observability
+- **Engine Lifecycle Specs** — added a comprehensive Playwright suite for validating engine transitions (Running -> Draining -> Paused -> Stopped).
+- **Planner & Lifecycle Docs** — added detailed technical documentation for the transcoding planner logic and engine state machine.
+- **Encode Attempt Tracking** — added a database migration to track individual encode attempts, laying the groundwork for more granular retry statistics.
+
+#### Hardware & Performance
+- **Concurrency & Speed Optimizations** — internal refinements to the executor and processor to improve hardware utilization and address reported speed issues on certain platforms.
+- **Backlog Grooming** — updated `TODO.md` with a focus on validating AMF and VAAPI AV1 hardware encoders.
+
## [0.3.1-rc.1] - 2026-04-08
### New Features
diff --git a/docs/docs/engine-lifecycle.md b/docs/docs/engine-lifecycle.md
new file mode 100644
index 0000000..fed125a
--- /dev/null
+++ b/docs/docs/engine-lifecycle.md
@@ -0,0 +1,152 @@
+---
+title: Engine Lifecycle
+description: Engine states, transitions, and job cancellation semantics.
+---
+
+The Alchemist engine is a background loop that claims queued jobs, processes them, and manages concurrent execution. This page documents all states, what triggers each transition, and the exact behavior during cancel, pause, drain, and restart.
+
+---
+
+## Engine states
+
+| State | Jobs start? | Active jobs affected? | How to enter |
+|-------|------------|----------------------|-------------|
+| **Running** | Yes | Not affected | Resume, restart |
+| **Paused** (manual) | No | Not cancelled | Header → Stop, `POST /api/engine/pause` |
+| **Paused** (scheduler) | No | Not cancelled | Schedule window activates |
+| **Draining** | No | Run to completion | Header → Stop (while running), `POST /api/engine/drain` |
+| **Restarting** | No (briefly) | Cancelled | `POST /api/engine/restart` |
+| **Shutdown** | No | Force-cancelled | Process exit / SIGTERM |
+
+Paused-manual and paused-scheduler are independent. Both must be cleared for jobs to start again.
+
+---
+
+## State transitions
+
+```
+ Resume
+ ┌──────────────────────────────┐
+ │ ▼
+Paused ◄─── Pause ─────── Running ──── Drain ───► Draining
+ │ ▲ │ │
+ │ Restart │ └─── Shutdown ──► Shutdown
+ │ ┌──────────┐ │
+ └─────►│ Restart │────────┘
+ └──────────┘
+ (brief pause,
+ cancel in-flight,
+ then resume)
+```
+
+### Pause
+
+- Sets `manual_paused = true`.
+- The claim loop polls every 2 seconds and blocks while paused.
+- Active jobs continue until they finish naturally.
+- Does **not** affect draining state.
+
+### Resume
+
+- Clears `manual_paused`.
+- Does **not** clear `scheduler_paused` (scheduler manages its own flag).
+- The claim loop immediately resumes on the next iteration.
+- Does **not** cancel the drain if draining.
+
+### Drain
+
+- Sets `draining = true` without setting `paused`.
+- No new jobs are claimed.
+- Active jobs run to completion.
+- When `in_flight_jobs` reaches zero: drain completes, `draining` is cleared, engine transitions to **Paused** (manual).
+
+### Restart
+
+1. Pause (set `manual_paused = true`).
+2. Cancel all in-flight jobs (Encoding, Remuxing, Analyzing, Resuming) via FFmpeg kill signal.
+3. Clear `draining` flag.
+4. Clear `idle_notified` flag.
+5. Resume (clear `manual_paused`).
+
+Cancelled in-flight jobs are marked `failed` with `failure_summary = "cancelled"`. They are eligible for automatic retry per the retry backoff schedule.
+
+### Shutdown
+
+Called when the process exits (SIGTERM / graceful shutdown):
+
+1. Cancel all active jobs via FFmpeg kill.
+2. Wait up to a short timeout for kills to complete.
+3. No retry is scheduled — the jobs return to `queued` on next startup.
+
+---
+
+## Job states
+
+| Job state | Meaning | Terminal? |
+|-----------|---------|-----------|
+| `queued` | Waiting to be claimed | No |
+| `analyzing` | FFprobe running on the file | No |
+| `encoding` | FFmpeg encoding in progress | No |
+| `remuxing` | FFmpeg stream-copy in progress | No |
+| `resuming` | Job being re-queued after retry | No |
+| `completed` | Encode finished successfully | Yes |
+| `skipped` | Planner decided not to transcode | Yes |
+| `failed` | Encode or analysis failed | Yes (with retry) |
+| `cancelled` | Cancelled by operator | Yes (with retry) |
+
+---
+
+## Retry backoff
+
+Failed and cancelled jobs are automatically retried. The engine checks elapsed time before claiming.
+
+| Attempt # | Backoff before retry |
+|-----------|---------------------|
+| 1 | 5 minutes |
+| 2 | 15 minutes |
+| 3 | 60 minutes |
+| 4+ | 6 hours |
+
+After 3 consecutive failures with no success, the job still retries on the 6-hour schedule. There is no permanent failure state from retries alone — operator must manually delete or cancel the job to stop retries.
+
+---
+
+## Cancel semantics
+
+### Cancel mid-analysis
+
+FFprobe process is not currently cancellable via signal. The cancel flag is checked before FFprobe starts. If analysis is in progress when cancel arrives, the job will be cancelled after analysis completes (before encoding starts).
+
+### Cancel mid-encode
+
+The FFmpeg process receives a kill signal immediately. The partial output file is cleaned up. The job is marked `failed` with `failure_summary = "cancelled"`.
+
+### Cancel while queued
+
+The job status is set to `cancelled` directly without any process kill.
+
+---
+
+## Pause vs. drain vs. restart
+
+| Operation | In-flight jobs | Partial output | New jobs |
+|-----------|---------------|---------------|----------|
+| Pause | Finish normally | Not affected | Blocked |
+| Drain | Finish normally | Not affected | Blocked until drain completes |
+| Restart | Killed | Cleaned up | Blocked briefly, then resume |
+| Shutdown | Killed | Cleaned up | N/A |
+
+Use **Pause** when you need to inspect the queue or change settings without losing progress.
+
+Use **Drain** when you want to stop gracefully after the current batch finishes (e.g. before maintenance).
+
+Use **Restart** to force a clean slate — e.g. after changing hardware settings that affect in-flight jobs.
+
+---
+
+## Boot sequence
+
+1. Migrations run.
+2. Any jobs left in `encoding`, `remuxing`, `analyzing`, or `resuming` are reset to `queued` (crash recovery).
+3. Boot analysis runs — all `queued` jobs that have no metadata have FFprobe run on them. This uses a single-slot semaphore and blocks the claim loop.
+4. Engine claim loop starts — jobs are claimed and processed up to the concurrent limit.
diff --git a/docs/docs/planner.md b/docs/docs/planner.md
new file mode 100644
index 0000000..63a3d71
--- /dev/null
+++ b/docs/docs/planner.md
@@ -0,0 +1,176 @@
+---
+title: Planner Heuristics
+description: How Alchemist decides whether to transcode, skip, or remux a file.
+---
+
+The planner runs once per job during the analysis phase and produces one of three decisions:
+
+- **Transcode** — re-encode the video stream.
+- **Remux** — copy streams into a different container (lossless, fast).
+- **Skip** — mark the file as not worth processing.
+
+Decisions are deterministic and based solely on file metadata and settings.
+
+---
+
+## Decision flow
+
+Each condition is evaluated in order. The first match wins.
+
+```
+1. already_target_codec → Skip (or Remux if container mismatch)
+2. no_available_encoders → Skip
+3. preferred_codec_unavailable → Skip (if fallback disabled)
+4. no_suitable_encoder → Skip (no encoder selected)
+5. incomplete_metadata → Skip (missing resolution)
+6. bpp_below_threshold → Skip (already efficient)
+7. below_min_file_size → Skip (too small)
+8. h264 source → Transcode (priority path)
+9. everything else → Transcode (transcode_recommended)
+```
+
+---
+
+## Skip conditions
+
+### already_target_codec
+
+The video stream is already in the target codec at the required bit depth.
+
+- **AV1 / HEVC target:** skip if codec matches AND bit depth is 10-bit.
+- **H.264 target:** skip if codec is h264 AND bit depth is 8-bit or lower.
+
+If the codec matches but the container does not (e.g. AV1 in an MP4, target MKV), the decision is **Remux** instead.
+
+```
+skip if: codec == target AND bit_depth == required_depth
+remux if: above AND container != target_container
+```
+
+---
+
+### bpp_below_threshold
+
+**Bits-per-pixel** measures how efficiently a file is already compressed relative to its resolution and frame rate.
+
+#### Formula
+
+```
+raw_bpp = video_bitrate_bps / (width × height × fps)
+normalized_bpp = raw_bpp × resolution_multiplier
+effective_threshold = min_bpp_threshold × confidence_multiplier × codec_multiplier × target_multiplier
+
+skip if: normalized_bpp < effective_threshold
+```
+
+#### Resolution multipliers
+
+| Resolution | Multiplier | Reason |
+|------------|-----------|--------|
+| ≥ 3840px wide (4K) | 0.60× | 4K compression is naturally denser |
+| ≥ 1920px wide (1080p) | 0.80× | HD has moderate density premium |
+| < 1920px (SD) | 1.00× | No adjustment |
+
+#### Confidence multipliers
+
+Applied to the threshold when Alchemist is uncertain about bitrate accuracy:
+
+| Confidence | Multiplier | When |
+|-----------|-----------|------|
+| High | 1.00× | Video bitrate directly reported by FFprobe |
+| Medium | 0.70× | Bitrate estimated from container/file size |
+| Low | 0.50× | Bitrate estimated with low reliability |
+
+Lower confidence → lower threshold → harder to skip → safer.
+
+#### Codec multipliers
+
+| Source codec | Multiplier | Reason |
+|-------------|-----------|--------|
+| h264 (AVC) | 0.60× | H.264 needs more bits to match HEVC/AV1 quality |
+
+#### Target multipliers
+
+| Target codec | Multiplier | Reason |
+|-------------|-----------|--------|
+| AV1 | 0.70× | AV1 is more efficient; skip more aggressively |
+| HEVC/H.264 | 1.00× | No adjustment |
+
+#### Worked example
+
+Settings: `min_bpp_threshold = 0.10`, target AV1, source HEVC 10-bit 4K.
+
+```
+raw_bpp = 15_000_000 / (3840 × 2160 × 24) = 0.0756
+normalized_bpp = 0.0756 × 0.60 = 0.0454 (4K multiplier)
+
+threshold = 0.10 × 1.00 × 1.00 × 0.70 = 0.070 (AV1 multiplier, HEVC source)
+
+0.0454 < 0.070 → SKIP (bpp_below_threshold)
+```
+
+---
+
+### below_min_file_size
+
+Files smaller than `min_file_size_mb` (default: 50 MB) are skipped. Small files have minimal savings potential relative to overhead.
+
+**Adjust:** Settings → Transcoding → Minimum file size.
+
+---
+
+### incomplete_metadata
+
+FFprobe could not determine resolution (width or height is zero). Without resolution, BPP cannot be computed and no valid decision can be made.
+
+**Diagnose:** run Library Doctor on the file.
+
+---
+
+### no_available_encoders
+
+No encoder is available for the target codec at all. Either:
+- CPU encoding is disabled (`allow_cpu_encoding = false`)
+- Hardware detection failed and CPU fallback is off
+
+**Fix:** Settings → Hardware → Enable CPU fallback.
+
+---
+
+### preferred_codec_unavailable_fallback_disabled
+
+The requested codec encoder is not available, and `allow_fallback = false` prevents using any substitute.
+
+**Fix:** Enable CPU fallback in Settings → Hardware, or check GPU detection.
+
+---
+
+## Transcode paths
+
+### transcode_h264_source
+
+H.264 files are unconditionally transcoded (if not skipped by BPP or size filters above). H.264 is the largest space-saving opportunity in most libraries.
+
+### transcode_recommended
+
+Everything else that passes the skip filters. Alchemist transcodes it because it is a plausible candidate based on the current codec and measured efficiency.
+
+---
+
+## Remux path
+
+### already_target_codec_wrong_container
+
+The video is already in the correct codec but wrapped in the wrong container (e.g. AV1 in `.mp4`, target is `.mkv`). Alchemist remuxes using stream copy — fast and lossless.
+
+---
+
+## Tuning
+
+| Setting | Effect |
+|---------|--------|
+| `min_bpp_threshold` | Higher = skip more files. Default: 0.10. |
+| `min_file_size_mb` | Higher = skip more small files. Default: 50. |
+| `size_reduction_threshold` | Minimum predicted savings. Default: 30%. |
+| `allow_fallback` | Allow CPU encoding when hardware is unavailable. |
+| `allow_cpu_encoding` | Allow CPU to encode (not just fall back). |
diff --git a/docs/package.json b/docs/package.json
index 66a88af..aea4495 100644
--- a/docs/package.json
+++ b/docs/package.json
@@ -1,6 +1,6 @@
{
"name": "alchemist-docs",
- "version": "0.3.1-rc.1",
+ "version": "0.3.1-rc.3",
"private": true,
"packageManager": "bun@1.3.5",
"scripts": {
diff --git a/migrations/20260411120000_encode_attempts.sql b/migrations/20260411120000_encode_attempts.sql
new file mode 100644
index 0000000..66002df
--- /dev/null
+++ b/migrations/20260411120000_encode_attempts.sql
@@ -0,0 +1,21 @@
+CREATE TABLE IF NOT EXISTS encode_attempts (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ job_id INTEGER NOT NULL REFERENCES jobs(id) ON DELETE CASCADE,
+ attempt_number INTEGER NOT NULL,
+ started_at TEXT,
+ finished_at TEXT NOT NULL DEFAULT (datetime('now')),
+ outcome TEXT NOT NULL CHECK(outcome IN ('completed', 'failed', 'cancelled')),
+ failure_code TEXT,
+ failure_summary TEXT,
+ input_size_bytes INTEGER,
+ output_size_bytes INTEGER,
+ encode_time_seconds REAL,
+ created_at TEXT NOT NULL DEFAULT (datetime('now'))
+);
+
+CREATE INDEX IF NOT EXISTS idx_encode_attempts_job_id ON encode_attempts(job_id);
+
+INSERT OR REPLACE INTO schema_info (key, value) VALUES
+ ('schema_version', '8'),
+ ('min_compatible_version', '0.2.5'),
+ ('last_updated', datetime('now'));
diff --git a/skills-lock.json b/skills-lock.json
new file mode 100644
index 0000000..6d9c722
--- /dev/null
+++ b/skills-lock.json
@@ -0,0 +1,10 @@
+{
+ "version": 1,
+ "skills": {
+ "caveman": {
+ "source": "JuliusBrussee/caveman",
+ "sourceType": "github",
+ "computedHash": "a818cdc41dcfaa50dd891c5cb5e5705968338de02e7e37949ca56e8c30ad4176"
+ }
+ }
+}
diff --git a/src/config.rs b/src/config.rs
index 2d65b2a..de48984 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -357,7 +357,9 @@ pub(crate) fn default_allow_fallback() -> bool {
}
pub(crate) fn default_tonemap_peak() -> f32 {
- 100.0
+ // HDR10 content is typically mastered at 1000 nits. Using 100 (SDR level)
+ // causes severe over-compression of highlights during tone-mapping.
+ 1000.0
}
pub(crate) fn default_tonemap_desat() -> f32 {
diff --git a/src/conversion.rs b/src/conversion.rs
index 90be1c8..e152f61 100644
--- a/src/conversion.rs
+++ b/src/conversion.rs
@@ -195,8 +195,8 @@ pub fn build_plan(
match normalized.video.hdr_mode.as_str() {
"tonemap" => filters.push(FilterStep::Tonemap {
algorithm: TonemapAlgorithm::Hable,
- peak: 100.0,
- desat: 0.2,
+ peak: crate::config::default_tonemap_peak(),
+ desat: crate::config::default_tonemap_desat(),
}),
"strip_metadata" => filters.push(FilterStep::StripHdrMetadata),
_ => {}
@@ -369,7 +369,18 @@ fn build_subtitle_plan(
copy_video: bool,
) -> Result {
match settings.subtitles.mode.as_str() {
- "copy" => Ok(SubtitleStreamPlan::CopyAllCompatible),
+ "copy" => {
+ if !crate::media::planner::subtitle_copy_supported(
+ &settings.output_container,
+ &analysis.metadata.subtitle_streams,
+ ) {
+ return Err(AlchemistError::Config(
+ "Subtitle copy is not supported for the selected output container with these subtitle codecs. \
+ Use 'remove' or 'burn' instead.".to_string(),
+ ));
+ }
+ Ok(SubtitleStreamPlan::CopyAllCompatible)
+ }
"remove" | "drop" | "none" => Ok(SubtitleStreamPlan::Drop),
"burn" => {
if copy_video {
diff --git a/src/db.rs b/src/db.rs
index 5750ba8..fef8911 100644
--- a/src/db.rs
+++ b/src/db.rs
@@ -576,6 +576,35 @@ pub struct DetailedEncodeStats {
pub created_at: DateTime,
}
+#[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)]
+pub struct EncodeAttempt {
+ pub id: i64,
+ pub job_id: i64,
+ pub attempt_number: i32,
+ pub started_at: Option,
+ pub finished_at: String,
+ pub outcome: String,
+ pub failure_code: Option,
+ pub failure_summary: Option,
+ pub input_size_bytes: Option,
+ pub output_size_bytes: Option,
+ pub encode_time_seconds: Option,
+ pub created_at: String,
+}
+
+#[derive(Debug, Clone)]
+pub struct EncodeAttemptInput {
+ pub job_id: i64,
+ pub attempt_number: i32,
+ pub started_at: Option,
+ pub outcome: String,
+ pub failure_code: Option,
+ pub failure_summary: Option,
+ pub input_size_bytes: Option,
+ pub output_size_bytes: Option,
+ pub encode_time_seconds: Option,
+}
+
#[derive(Debug, Clone)]
pub struct EncodeStatsInput {
pub job_id: i64,
@@ -1161,6 +1190,45 @@ impl Db {
Ok(())
}
+ /// Record a single encode attempt outcome
+ pub async fn insert_encode_attempt(&self, input: EncodeAttemptInput) -> Result<()> {
+ sqlx::query(
+ "INSERT INTO encode_attempts
+ (job_id, attempt_number, started_at, finished_at, outcome,
+ failure_code, failure_summary, input_size_bytes, output_size_bytes,
+ encode_time_seconds)
+ VALUES (?, ?, ?, datetime('now'), ?, ?, ?, ?, ?, ?)",
+ )
+ .bind(input.job_id)
+ .bind(input.attempt_number)
+ .bind(input.started_at)
+ .bind(input.outcome)
+ .bind(input.failure_code)
+ .bind(input.failure_summary)
+ .bind(input.input_size_bytes)
+ .bind(input.output_size_bytes)
+ .bind(input.encode_time_seconds)
+ .execute(&self.pool)
+ .await?;
+ Ok(())
+ }
+
+ /// Get all encode attempts for a job, ordered by attempt_number
+ pub async fn get_encode_attempts_by_job(&self, job_id: i64) -> Result> {
+ let attempts = sqlx::query_as::<_, EncodeAttempt>(
+ "SELECT id, job_id, attempt_number, started_at, finished_at, outcome,
+ failure_code, failure_summary, input_size_bytes, output_size_bytes,
+ encode_time_seconds, created_at
+ FROM encode_attempts
+ WHERE job_id = ?
+ ORDER BY attempt_number ASC",
+ )
+ .bind(job_id)
+ .fetch_all(&self.pool)
+ .await?;
+ Ok(attempts)
+ }
+
/// Get job by ID
pub async fn get_job(&self, id: i64) -> Result