diff --git a/.agents/skills/caveman/SKILL.md b/.agents/skills/caveman/SKILL.md new file mode 100644 index 0000000..2ab498b --- /dev/null +++ b/.agents/skills/caveman/SKILL.md @@ -0,0 +1,67 @@ +--- +name: caveman +description: > + Ultra-compressed communication mode. Cuts token usage ~75% by speaking like caveman + while keeping full technical accuracy. Supports intensity levels: lite, full (default), ultra, + wenyan-lite, wenyan-full, wenyan-ultra. + Use when user says "caveman mode", "talk like caveman", "use caveman", "less tokens", + "be brief", or invokes /caveman. Also auto-triggers when token efficiency is requested. +--- + +Respond terse like smart caveman. All technical substance stay. Only fluff die. + +## Persistence + +ACTIVE EVERY RESPONSE. No revert after many turns. No filler drift. Still active if unsure. Off only: "stop caveman" / "normal mode". + +Default: **full**. Switch: `/caveman lite|full|ultra`. + +## Rules + +Drop: articles (a/an/the), filler (just/really/basically/actually/simply), pleasantries (sure/certainly/of course/happy to), hedging. Fragments OK. Short synonyms (big not extensive, fix not "implement a solution for"). Technical terms exact. Code blocks unchanged. Errors quoted exact. + +Pattern: `[thing] [action] [reason]. [next step].` + +Not: "Sure! I'd be happy to help you with that. The issue you're experiencing is likely caused by..." +Yes: "Bug in auth middleware. Token expiry check use `<` not `<=`. Fix:" + +## Intensity + +| Level | What change | +|-------|------------| +| **lite** | No filler/hedging. Keep articles + full sentences. Professional but tight | +| **full** | Drop articles, fragments OK, short synonyms. Classic caveman | +| **ultra** | Abbreviate (DB/auth/config/req/res/fn/impl), strip conjunctions, arrows for causality (X → Y), one word when one word enough | +| **wenyan-lite** | Semi-classical. Drop filler/hedging but keep grammar structure, classical register | +| **wenyan-full** | Maximum classical terseness. Fully 文言文. 80-90% character reduction. Classical sentence patterns, verbs precede objects, subjects often omitted, classical particles (之/乃/為/其) | +| **wenyan-ultra** | Extreme abbreviation while keeping classical Chinese feel. Maximum compression, ultra terse | + +Example — "Why React component re-render?" +- lite: "Your component re-renders because you create a new object reference each render. Wrap it in `useMemo`." +- full: "New object ref each render. Inline object prop = new ref = re-render. Wrap in `useMemo`." +- ultra: "Inline obj prop → new ref → re-render. `useMemo`." +- wenyan-lite: "組件頻重繪,以每繪新生對象參照故。以 useMemo 包之。" +- wenyan-full: "物出新參照,致重繪。useMemo .Wrap之。" +- wenyan-ultra: "新參照→重繪。useMemo Wrap。" + +Example — "Explain database connection pooling." +- lite: "Connection pooling reuses open connections instead of creating new ones per request. Avoids repeated handshake overhead." +- full: "Pool reuse open DB connections. No new connection per request. Skip handshake overhead." +- ultra: "Pool = reuse DB conn. Skip handshake → fast under load." +- wenyan-full: "池reuse open connection。不每req新開。skip handshake overhead。" +- wenyan-ultra: "池reuse conn。skip handshake → fast。" + +## Auto-Clarity + +Drop caveman for: security warnings, irreversible action confirmations, multi-step sequences where fragment order risks misread, user asks to clarify or repeats question. Resume caveman after clear part done. + +Example — destructive op: +> **Warning:** This will permanently delete all rows in the `users` table and cannot be undone. +> ```sql +> DROP TABLE users; +> ``` +> Caveman resume. Verify backup exist first. + +## Boundaries + +Code/commits/PRs: write normal. "stop caveman" or "normal mode": revert. Level persist until changed or session end. \ No newline at end of file diff --git a/.claude/settings.local.json b/.claude/settings.local.json index 6471280..3066837 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -12,7 +12,22 @@ "Bash(bash --version)", "Bash(git tag:*)", "Bash(cargo clippy:*)", - "Bash(bun run:*)" + "Bash(bun run:*)", + "Bash(ls /Users/brooklyn/data/alchemist/*.md)", + "Bash(ls /Users/brooklyn/data/alchemist/docs/*.md)", + "Bash(npx skills:*)", + "Bash(find /Users/brooklyn/data/alchemist/web -name tailwind.config.* -o -name *.config.ts -o -name *.config.js)", + "Bash(just check-web:*)", + "Bash(git stash:*)", + "Bash(just test-e2e:*)", + "Bash(bunx tsc:*)", + "Bash(wait)", + "Bash(npx playwright:*)", + "Bash(just check-rust:*)", + "Bash(cargo fmt:*)", + "Bash(cargo test:*)", + "Bash(just check:*)", + "Bash(just test:*)" ] } } diff --git a/.claude/skills/caveman b/.claude/skills/caveman new file mode 120000 index 0000000..9016aac --- /dev/null +++ b/.claude/skills/caveman @@ -0,0 +1 @@ +../../.agents/skills/caveman \ No newline at end of file diff --git a/.idea/alchemist.iml b/.idea/alchemist.iml index 568915c..5adcd65 100644 --- a/.idea/alchemist.iml +++ b/.idea/alchemist.iml @@ -2,7 +2,7 @@ - + @@ -13,6 +13,5 @@ - \ No newline at end of file diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml index 5cb71ef..320340d 100644 --- a/.idea/inspectionProfiles/Project_Default.xml +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -1,6 +1,8 @@ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d1af97..551598e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,28 @@ All notable changes to this project will be documented in this file. +## [0.3.1-rc.3] - 2026-04-12 + +### New Features + +#### Job Management Refactor +- **Componentized Job Manager** — extracted monolithic `JobManager.tsx` into a modular suite under `web/src/components/jobs/`, including dedicated components for the toolbar, table, and detail modal. +- **Enhanced Job Detail Modal** — rebuilt the job detail view with better loading states, smoother transitions, and improved information hierarchy for analysis, decisions, and failure reasons. +- **Job SSE Hook** — unified job-related Server-Sent Events logic into a custom `useJobSSE` hook for better state management and reduced re-renders. + +#### Themes & UX +- **Midnight OLED+** — enhanced the `midnight` theme with true-black surfaces and suppressed decorative gradients to maximize OLED power savings. +- **Improved Toasts** — toast notifications now feature a high-quality backdrop blur and refined border styling for better visibility against busy backgrounds. + +#### Reliability & Observability +- **Engine Lifecycle Specs** — added a comprehensive Playwright suite for validating engine transitions (Running -> Draining -> Paused -> Stopped). +- **Planner & Lifecycle Docs** — added detailed technical documentation for the transcoding planner logic and engine state machine. +- **Encode Attempt Tracking** — added a database migration to track individual encode attempts, laying the groundwork for more granular retry statistics. + +#### Hardware & Performance +- **Concurrency & Speed Optimizations** — internal refinements to the executor and processor to improve hardware utilization and address reported speed issues on certain platforms. +- **Backlog Grooming** — updated `TODO.md` with a focus on validating AMF and VAAPI AV1 hardware encoders. + ## [0.3.1-rc.1] - 2026-04-08 ### New Features diff --git a/Cargo.lock b/Cargo.lock index 7144fb5..2feb7a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,7 +13,7 @@ dependencies = [ [[package]] name = "alchemist" -version = "0.3.1-rc.1" +version = "0.3.1-rc.3" dependencies = [ "anyhow", "argon2", diff --git a/Cargo.toml b/Cargo.toml index b3b3a55..2858197 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "alchemist" -version = "0.3.1-rc.1" +version = "0.3.1-rc.3" edition = "2024" rust-version = "1.85" license = "GPL-3.0" diff --git a/TODO.md b/TODO.md index 7a37d96..56b70f2 100644 --- a/TODO.md +++ b/TODO.md @@ -1,21 +1,8 @@ # Todo List -Remove `src/wizard.rs` from the project, the web setup handles it.. maybe keep for CLI users? +## AMD / VAAPI / AMF -## Frontend - -- Rework the Jobs screen sorting/filter island so it uses space more intelligently on narrow screens and overflows in a controlled, intentional-looking way instead of overflowing awkwardly. -- Make the toast across all pages blur the background instead of reading as transparent. -- Fix the Jobs modal so active jobs do not show `Waiting for analysis` while encoding/remuxing is already in progress. -- Reduce the stop/drain redundancy in the header so pressing Stop does not leave both the button and the status pill saying `Stopping`. -- Make the `midnight` OLED theme truly black, without gray treatment or shared gradients. - -## Backend - -- Investigate why encoding is very slow on macOS even when hardware acceleration is selected. -- Investigate why so many jobs are skipped and why only one job appears to run at a time even when concurrent jobs are enabled. -- Fix the clippy error that is currently blocking CI/CD. - -## Jobs / UX - -- Improve failed-job explanations on the Jobs screen when the current failure summary is weak or missing. +- Validate `av1_vaapi` on real Linux VAAPI hardware — confirm encode succeeds with current args. +- Validate `av1_amf` on real Windows AMF hardware — confirm encode succeeds with current args. +- If either encoder needs quality/rate-control params, apply the same pattern as the VideoToolbox fix (add `rate_control: Option<&RateControl>` to `vaapi::append_args` and `amf::append_args`). +- Update support claims in README and UI only after validation passes. diff --git a/VERSION b/VERSION index c16a70a..21ad887 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.3.1-rc.1 +0.3.1-rc.3 diff --git a/audit.md b/audit.md new file mode 100644 index 0000000..62321a4 --- /dev/null +++ b/audit.md @@ -0,0 +1,136 @@ +# Audit Findings + +Date: 2026-04-11 + +## Summary + +This audit focused on the highest-risk paths in Alchemist: + +- queue claiming and cancellation +- media planning and execution +- conversion validation +- setup/auth exposure +- job detail and failure UX + +The current automated checks were green at audit time, but several real +correctness and behavior issues remain. + +## Findings + +### [P1] Canceling a job during analysis can be overwritten + +Relevant code: + +- `src/server/jobs.rs:41` +- `src/media/pipeline.rs:927` +- `src/media/pipeline.rs:970` +- `src/orchestrator.rs:239` + +`request_job_cancel()` marks `analyzing` and `resuming` jobs as +`cancelled` immediately. But the analysis/planning path can still run to +completion and later overwrite that state to `skipped`, +`encoding`/`remuxing`, or another follow-on state. + +The transcoder-side `pending_cancels` check only applies around FFmpeg +spawn, so a cancel issued during analysis is not guaranteed to stop the +pipeline before state transitions are persisted. + +Impact: + +- a user-visible cancel can be lost +- the UI can report a cancelled job that later resumes or becomes skipped +- queue state becomes harder to trust + +### [P1] VideoToolbox quality controls are effectively a no-op + +Relevant code: + +- `src/config.rs:85` +- `src/media/planner.rs:633` +- `src/media/ffmpeg/videotoolbox.rs:3` +- `src/conversion.rs:424` + +The config still defines a VideoToolbox quality ladder, and the planner +still emits `RateControl::Cq` for VideoToolbox encoders. But the actual +VideoToolbox FFmpeg builder ignores rate-control input entirely. + +The Convert workflow does the same thing by still generating `Cq` for +non-CPU/QSV encoders even though the VideoToolbox path does not consume +it. + +Impact: + +- quality profile does not meaningfully affect VideoToolbox jobs +- Convert quality values for VideoToolbox are misleading +- macOS throughput/quality tradeoffs are harder to reason about + +### [P2] Convert does not reuse subtitle/container compatibility checks + +Relevant code: + +- `src/media/planner.rs:863` +- `src/media/planner.rs:904` +- `src/conversion.rs:272` +- `src/conversion.rs:366` + +The main library planner explicitly rejects unsafe subtitle-copy +combinations, especially for MP4/MOV targets. The Convert flow has its +own normalization/build path and does not reuse that validation. + +Impact: + +- the Convert UI can accept settings that are known to fail later in FFmpeg +- conversion behavior diverges from library-job behavior +- users can hit avoidable execution-time errors instead of fast validation + +### [P2] Completed job details omit metadata at the API layer + +Relevant code: + +- `src/server/jobs.rs:344` +- `web/src/components/JobManager.tsx:1774` + +The job detail endpoint explicitly returns `metadata = None` for +`completed` jobs, even though the Jobs modal is structured to display +input metadata when available. + +Impact: + +- completed-job details are structurally incomplete +- the frontend needs special-case empty-state behavior +- operator confidence is lower when comparing completed jobs after the fact + +### [P2] LAN-only setup is easy to misconfigure behind a local reverse proxy + +Relevant code: + +- `src/server/middleware.rs:269` +- `src/server/middleware.rs:300` + +The setup gate uses `request_ip()` and trusts forwarded headers only when +the direct peer is local/private. If Alchemist sits behind a loopback or +LAN reverse proxy that fails to forward the real client IP, the request +falls back to the proxy peer IP and is treated as LAN-local. + +Impact: + +- public reverse-proxy deployments can accidentally expose setup +- behavior depends on correct proxy header forwarding +- the security model is sound in principle but fragile in deployment + +## What To Fix First + +1. Fix the cancel-during-analysis race. +2. Fix or redesign VideoToolbox quality handling so the UI and planner do + not promise controls that the backend ignores. +3. Reuse planner validation in Convert for subtitle/container safety. +4. Decide whether completed jobs should persist and return metadata in the + detail API. + +## What To Investigate Next + +1. Use runtime diagnostics to confirm whether macOS slowness is true + hardware underperformance, silent fallback, or filter overhead. +2. Verify whether “only one job at a time” is caused by actual worker + serialization or by planner eligibility/skips. +3. Review dominant skip reasons before relaxing planner heuristics. diff --git a/docs/docs/changelog.md b/docs/docs/changelog.md index b72234f..6ffadc8 100644 --- a/docs/docs/changelog.md +++ b/docs/docs/changelog.md @@ -3,6 +3,28 @@ title: Changelog description: Release history for Alchemist. --- +## [0.3.1-rc.3] - 2026-04-12 + +### New Features + +#### Job Management Refactor +- **Componentized Job Manager** — extracted monolithic `JobManager.tsx` into a modular suite under `web/src/components/jobs/`, including dedicated components for the toolbar, table, and detail modal. +- **Enhanced Job Detail Modal** — rebuilt the job detail view with better loading states, smoother transitions, and improved information hierarchy for analysis, decisions, and failure reasons. +- **Job SSE Hook** — unified job-related Server-Sent Events logic into a custom `useJobSSE` hook for better state management and reduced re-renders. + +#### Themes & UX +- **Midnight OLED+** — enhanced the `midnight` theme with true-black surfaces and suppressed decorative gradients to maximize OLED power savings. +- **Improved Toasts** — toast notifications now feature a high-quality backdrop blur and refined border styling for better visibility against busy backgrounds. + +#### Reliability & Observability +- **Engine Lifecycle Specs** — added a comprehensive Playwright suite for validating engine transitions (Running -> Draining -> Paused -> Stopped). +- **Planner & Lifecycle Docs** — added detailed technical documentation for the transcoding planner logic and engine state machine. +- **Encode Attempt Tracking** — added a database migration to track individual encode attempts, laying the groundwork for more granular retry statistics. + +#### Hardware & Performance +- **Concurrency & Speed Optimizations** — internal refinements to the executor and processor to improve hardware utilization and address reported speed issues on certain platforms. +- **Backlog Grooming** — updated `TODO.md` with a focus on validating AMF and VAAPI AV1 hardware encoders. + ## [0.3.1-rc.1] - 2026-04-08 ### New Features diff --git a/docs/docs/engine-lifecycle.md b/docs/docs/engine-lifecycle.md new file mode 100644 index 0000000..fed125a --- /dev/null +++ b/docs/docs/engine-lifecycle.md @@ -0,0 +1,152 @@ +--- +title: Engine Lifecycle +description: Engine states, transitions, and job cancellation semantics. +--- + +The Alchemist engine is a background loop that claims queued jobs, processes them, and manages concurrent execution. This page documents all states, what triggers each transition, and the exact behavior during cancel, pause, drain, and restart. + +--- + +## Engine states + +| State | Jobs start? | Active jobs affected? | How to enter | +|-------|------------|----------------------|-------------| +| **Running** | Yes | Not affected | Resume, restart | +| **Paused** (manual) | No | Not cancelled | Header → Stop, `POST /api/engine/pause` | +| **Paused** (scheduler) | No | Not cancelled | Schedule window activates | +| **Draining** | No | Run to completion | Header → Stop (while running), `POST /api/engine/drain` | +| **Restarting** | No (briefly) | Cancelled | `POST /api/engine/restart` | +| **Shutdown** | No | Force-cancelled | Process exit / SIGTERM | + +Paused-manual and paused-scheduler are independent. Both must be cleared for jobs to start again. + +--- + +## State transitions + +``` + Resume + ┌──────────────────────────────┐ + │ ▼ +Paused ◄─── Pause ─────── Running ──── Drain ───► Draining + │ ▲ │ │ + │ Restart │ └─── Shutdown ──► Shutdown + │ ┌──────────┐ │ + └─────►│ Restart │────────┘ + └──────────┘ + (brief pause, + cancel in-flight, + then resume) +``` + +### Pause + +- Sets `manual_paused = true`. +- The claim loop polls every 2 seconds and blocks while paused. +- Active jobs continue until they finish naturally. +- Does **not** affect draining state. + +### Resume + +- Clears `manual_paused`. +- Does **not** clear `scheduler_paused` (scheduler manages its own flag). +- The claim loop immediately resumes on the next iteration. +- Does **not** cancel the drain if draining. + +### Drain + +- Sets `draining = true` without setting `paused`. +- No new jobs are claimed. +- Active jobs run to completion. +- When `in_flight_jobs` reaches zero: drain completes, `draining` is cleared, engine transitions to **Paused** (manual). + +### Restart + +1. Pause (set `manual_paused = true`). +2. Cancel all in-flight jobs (Encoding, Remuxing, Analyzing, Resuming) via FFmpeg kill signal. +3. Clear `draining` flag. +4. Clear `idle_notified` flag. +5. Resume (clear `manual_paused`). + +Cancelled in-flight jobs are marked `failed` with `failure_summary = "cancelled"`. They are eligible for automatic retry per the retry backoff schedule. + +### Shutdown + +Called when the process exits (SIGTERM / graceful shutdown): + +1. Cancel all active jobs via FFmpeg kill. +2. Wait up to a short timeout for kills to complete. +3. No retry is scheduled — the jobs return to `queued` on next startup. + +--- + +## Job states + +| Job state | Meaning | Terminal? | +|-----------|---------|-----------| +| `queued` | Waiting to be claimed | No | +| `analyzing` | FFprobe running on the file | No | +| `encoding` | FFmpeg encoding in progress | No | +| `remuxing` | FFmpeg stream-copy in progress | No | +| `resuming` | Job being re-queued after retry | No | +| `completed` | Encode finished successfully | Yes | +| `skipped` | Planner decided not to transcode | Yes | +| `failed` | Encode or analysis failed | Yes (with retry) | +| `cancelled` | Cancelled by operator | Yes (with retry) | + +--- + +## Retry backoff + +Failed and cancelled jobs are automatically retried. The engine checks elapsed time before claiming. + +| Attempt # | Backoff before retry | +|-----------|---------------------| +| 1 | 5 minutes | +| 2 | 15 minutes | +| 3 | 60 minutes | +| 4+ | 6 hours | + +After 3 consecutive failures with no success, the job still retries on the 6-hour schedule. There is no permanent failure state from retries alone — operator must manually delete or cancel the job to stop retries. + +--- + +## Cancel semantics + +### Cancel mid-analysis + +FFprobe process is not currently cancellable via signal. The cancel flag is checked before FFprobe starts. If analysis is in progress when cancel arrives, the job will be cancelled after analysis completes (before encoding starts). + +### Cancel mid-encode + +The FFmpeg process receives a kill signal immediately. The partial output file is cleaned up. The job is marked `failed` with `failure_summary = "cancelled"`. + +### Cancel while queued + +The job status is set to `cancelled` directly without any process kill. + +--- + +## Pause vs. drain vs. restart + +| Operation | In-flight jobs | Partial output | New jobs | +|-----------|---------------|---------------|----------| +| Pause | Finish normally | Not affected | Blocked | +| Drain | Finish normally | Not affected | Blocked until drain completes | +| Restart | Killed | Cleaned up | Blocked briefly, then resume | +| Shutdown | Killed | Cleaned up | N/A | + +Use **Pause** when you need to inspect the queue or change settings without losing progress. + +Use **Drain** when you want to stop gracefully after the current batch finishes (e.g. before maintenance). + +Use **Restart** to force a clean slate — e.g. after changing hardware settings that affect in-flight jobs. + +--- + +## Boot sequence + +1. Migrations run. +2. Any jobs left in `encoding`, `remuxing`, `analyzing`, or `resuming` are reset to `queued` (crash recovery). +3. Boot analysis runs — all `queued` jobs that have no metadata have FFprobe run on them. This uses a single-slot semaphore and blocks the claim loop. +4. Engine claim loop starts — jobs are claimed and processed up to the concurrent limit. diff --git a/docs/docs/planner.md b/docs/docs/planner.md new file mode 100644 index 0000000..63a3d71 --- /dev/null +++ b/docs/docs/planner.md @@ -0,0 +1,176 @@ +--- +title: Planner Heuristics +description: How Alchemist decides whether to transcode, skip, or remux a file. +--- + +The planner runs once per job during the analysis phase and produces one of three decisions: + +- **Transcode** — re-encode the video stream. +- **Remux** — copy streams into a different container (lossless, fast). +- **Skip** — mark the file as not worth processing. + +Decisions are deterministic and based solely on file metadata and settings. + +--- + +## Decision flow + +Each condition is evaluated in order. The first match wins. + +``` +1. already_target_codec → Skip (or Remux if container mismatch) +2. no_available_encoders → Skip +3. preferred_codec_unavailable → Skip (if fallback disabled) +4. no_suitable_encoder → Skip (no encoder selected) +5. incomplete_metadata → Skip (missing resolution) +6. bpp_below_threshold → Skip (already efficient) +7. below_min_file_size → Skip (too small) +8. h264 source → Transcode (priority path) +9. everything else → Transcode (transcode_recommended) +``` + +--- + +## Skip conditions + +### already_target_codec + +The video stream is already in the target codec at the required bit depth. + +- **AV1 / HEVC target:** skip if codec matches AND bit depth is 10-bit. +- **H.264 target:** skip if codec is h264 AND bit depth is 8-bit or lower. + +If the codec matches but the container does not (e.g. AV1 in an MP4, target MKV), the decision is **Remux** instead. + +``` +skip if: codec == target AND bit_depth == required_depth +remux if: above AND container != target_container +``` + +--- + +### bpp_below_threshold + +**Bits-per-pixel** measures how efficiently a file is already compressed relative to its resolution and frame rate. + +#### Formula + +``` +raw_bpp = video_bitrate_bps / (width × height × fps) +normalized_bpp = raw_bpp × resolution_multiplier +effective_threshold = min_bpp_threshold × confidence_multiplier × codec_multiplier × target_multiplier + +skip if: normalized_bpp < effective_threshold +``` + +#### Resolution multipliers + +| Resolution | Multiplier | Reason | +|------------|-----------|--------| +| ≥ 3840px wide (4K) | 0.60× | 4K compression is naturally denser | +| ≥ 1920px wide (1080p) | 0.80× | HD has moderate density premium | +| < 1920px (SD) | 1.00× | No adjustment | + +#### Confidence multipliers + +Applied to the threshold when Alchemist is uncertain about bitrate accuracy: + +| Confidence | Multiplier | When | +|-----------|-----------|------| +| High | 1.00× | Video bitrate directly reported by FFprobe | +| Medium | 0.70× | Bitrate estimated from container/file size | +| Low | 0.50× | Bitrate estimated with low reliability | + +Lower confidence → lower threshold → harder to skip → safer. + +#### Codec multipliers + +| Source codec | Multiplier | Reason | +|-------------|-----------|--------| +| h264 (AVC) | 0.60× | H.264 needs more bits to match HEVC/AV1 quality | + +#### Target multipliers + +| Target codec | Multiplier | Reason | +|-------------|-----------|--------| +| AV1 | 0.70× | AV1 is more efficient; skip more aggressively | +| HEVC/H.264 | 1.00× | No adjustment | + +#### Worked example + +Settings: `min_bpp_threshold = 0.10`, target AV1, source HEVC 10-bit 4K. + +``` +raw_bpp = 15_000_000 / (3840 × 2160 × 24) = 0.0756 +normalized_bpp = 0.0756 × 0.60 = 0.0454 (4K multiplier) + +threshold = 0.10 × 1.00 × 1.00 × 0.70 = 0.070 (AV1 multiplier, HEVC source) + +0.0454 < 0.070 → SKIP (bpp_below_threshold) +``` + +--- + +### below_min_file_size + +Files smaller than `min_file_size_mb` (default: 50 MB) are skipped. Small files have minimal savings potential relative to overhead. + +**Adjust:** Settings → Transcoding → Minimum file size. + +--- + +### incomplete_metadata + +FFprobe could not determine resolution (width or height is zero). Without resolution, BPP cannot be computed and no valid decision can be made. + +**Diagnose:** run Library Doctor on the file. + +--- + +### no_available_encoders + +No encoder is available for the target codec at all. Either: +- CPU encoding is disabled (`allow_cpu_encoding = false`) +- Hardware detection failed and CPU fallback is off + +**Fix:** Settings → Hardware → Enable CPU fallback. + +--- + +### preferred_codec_unavailable_fallback_disabled + +The requested codec encoder is not available, and `allow_fallback = false` prevents using any substitute. + +**Fix:** Enable CPU fallback in Settings → Hardware, or check GPU detection. + +--- + +## Transcode paths + +### transcode_h264_source + +H.264 files are unconditionally transcoded (if not skipped by BPP or size filters above). H.264 is the largest space-saving opportunity in most libraries. + +### transcode_recommended + +Everything else that passes the skip filters. Alchemist transcodes it because it is a plausible candidate based on the current codec and measured efficiency. + +--- + +## Remux path + +### already_target_codec_wrong_container + +The video is already in the correct codec but wrapped in the wrong container (e.g. AV1 in `.mp4`, target is `.mkv`). Alchemist remuxes using stream copy — fast and lossless. + +--- + +## Tuning + +| Setting | Effect | +|---------|--------| +| `min_bpp_threshold` | Higher = skip more files. Default: 0.10. | +| `min_file_size_mb` | Higher = skip more small files. Default: 50. | +| `size_reduction_threshold` | Minimum predicted savings. Default: 30%. | +| `allow_fallback` | Allow CPU encoding when hardware is unavailable. | +| `allow_cpu_encoding` | Allow CPU to encode (not just fall back). | diff --git a/docs/package.json b/docs/package.json index 66a88af..aea4495 100644 --- a/docs/package.json +++ b/docs/package.json @@ -1,6 +1,6 @@ { "name": "alchemist-docs", - "version": "0.3.1-rc.1", + "version": "0.3.1-rc.3", "private": true, "packageManager": "bun@1.3.5", "scripts": { diff --git a/migrations/20260411120000_encode_attempts.sql b/migrations/20260411120000_encode_attempts.sql new file mode 100644 index 0000000..66002df --- /dev/null +++ b/migrations/20260411120000_encode_attempts.sql @@ -0,0 +1,21 @@ +CREATE TABLE IF NOT EXISTS encode_attempts ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + job_id INTEGER NOT NULL REFERENCES jobs(id) ON DELETE CASCADE, + attempt_number INTEGER NOT NULL, + started_at TEXT, + finished_at TEXT NOT NULL DEFAULT (datetime('now')), + outcome TEXT NOT NULL CHECK(outcome IN ('completed', 'failed', 'cancelled')), + failure_code TEXT, + failure_summary TEXT, + input_size_bytes INTEGER, + output_size_bytes INTEGER, + encode_time_seconds REAL, + created_at TEXT NOT NULL DEFAULT (datetime('now')) +); + +CREATE INDEX IF NOT EXISTS idx_encode_attempts_job_id ON encode_attempts(job_id); + +INSERT OR REPLACE INTO schema_info (key, value) VALUES + ('schema_version', '8'), + ('min_compatible_version', '0.2.5'), + ('last_updated', datetime('now')); diff --git a/skills-lock.json b/skills-lock.json new file mode 100644 index 0000000..6d9c722 --- /dev/null +++ b/skills-lock.json @@ -0,0 +1,10 @@ +{ + "version": 1, + "skills": { + "caveman": { + "source": "JuliusBrussee/caveman", + "sourceType": "github", + "computedHash": "a818cdc41dcfaa50dd891c5cb5e5705968338de02e7e37949ca56e8c30ad4176" + } + } +} diff --git a/src/config.rs b/src/config.rs index 2d65b2a..de48984 100644 --- a/src/config.rs +++ b/src/config.rs @@ -357,7 +357,9 @@ pub(crate) fn default_allow_fallback() -> bool { } pub(crate) fn default_tonemap_peak() -> f32 { - 100.0 + // HDR10 content is typically mastered at 1000 nits. Using 100 (SDR level) + // causes severe over-compression of highlights during tone-mapping. + 1000.0 } pub(crate) fn default_tonemap_desat() -> f32 { diff --git a/src/conversion.rs b/src/conversion.rs index 90be1c8..e152f61 100644 --- a/src/conversion.rs +++ b/src/conversion.rs @@ -195,8 +195,8 @@ pub fn build_plan( match normalized.video.hdr_mode.as_str() { "tonemap" => filters.push(FilterStep::Tonemap { algorithm: TonemapAlgorithm::Hable, - peak: 100.0, - desat: 0.2, + peak: crate::config::default_tonemap_peak(), + desat: crate::config::default_tonemap_desat(), }), "strip_metadata" => filters.push(FilterStep::StripHdrMetadata), _ => {} @@ -369,7 +369,18 @@ fn build_subtitle_plan( copy_video: bool, ) -> Result { match settings.subtitles.mode.as_str() { - "copy" => Ok(SubtitleStreamPlan::CopyAllCompatible), + "copy" => { + if !crate::media::planner::subtitle_copy_supported( + &settings.output_container, + &analysis.metadata.subtitle_streams, + ) { + return Err(AlchemistError::Config( + "Subtitle copy is not supported for the selected output container with these subtitle codecs. \ + Use 'remove' or 'burn' instead.".to_string(), + )); + } + Ok(SubtitleStreamPlan::CopyAllCompatible) + } "remove" | "drop" | "none" => Ok(SubtitleStreamPlan::Drop), "burn" => { if copy_video { diff --git a/src/db.rs b/src/db.rs index 5750ba8..fef8911 100644 --- a/src/db.rs +++ b/src/db.rs @@ -576,6 +576,35 @@ pub struct DetailedEncodeStats { pub created_at: DateTime, } +#[derive(Debug, Serialize, Deserialize, Clone, sqlx::FromRow)] +pub struct EncodeAttempt { + pub id: i64, + pub job_id: i64, + pub attempt_number: i32, + pub started_at: Option, + pub finished_at: String, + pub outcome: String, + pub failure_code: Option, + pub failure_summary: Option, + pub input_size_bytes: Option, + pub output_size_bytes: Option, + pub encode_time_seconds: Option, + pub created_at: String, +} + +#[derive(Debug, Clone)] +pub struct EncodeAttemptInput { + pub job_id: i64, + pub attempt_number: i32, + pub started_at: Option, + pub outcome: String, + pub failure_code: Option, + pub failure_summary: Option, + pub input_size_bytes: Option, + pub output_size_bytes: Option, + pub encode_time_seconds: Option, +} + #[derive(Debug, Clone)] pub struct EncodeStatsInput { pub job_id: i64, @@ -1161,6 +1190,45 @@ impl Db { Ok(()) } + /// Record a single encode attempt outcome + pub async fn insert_encode_attempt(&self, input: EncodeAttemptInput) -> Result<()> { + sqlx::query( + "INSERT INTO encode_attempts + (job_id, attempt_number, started_at, finished_at, outcome, + failure_code, failure_summary, input_size_bytes, output_size_bytes, + encode_time_seconds) + VALUES (?, ?, ?, datetime('now'), ?, ?, ?, ?, ?, ?)", + ) + .bind(input.job_id) + .bind(input.attempt_number) + .bind(input.started_at) + .bind(input.outcome) + .bind(input.failure_code) + .bind(input.failure_summary) + .bind(input.input_size_bytes) + .bind(input.output_size_bytes) + .bind(input.encode_time_seconds) + .execute(&self.pool) + .await?; + Ok(()) + } + + /// Get all encode attempts for a job, ordered by attempt_number + pub async fn get_encode_attempts_by_job(&self, job_id: i64) -> Result> { + let attempts = sqlx::query_as::<_, EncodeAttempt>( + "SELECT id, job_id, attempt_number, started_at, finished_at, outcome, + failure_code, failure_summary, input_size_bytes, output_size_bytes, + encode_time_seconds, created_at + FROM encode_attempts + WHERE job_id = ? + ORDER BY attempt_number ASC", + ) + .bind(job_id) + .fetch_all(&self.pool) + .await?; + Ok(attempts) + } + /// Get job by ID pub async fn get_job(&self, id: i64) -> Result> { let job = sqlx::query_as::<_, Job>( @@ -2531,6 +2599,32 @@ impl Db { .await } + pub async fn get_skip_reason_counts(&self) -> Result> { + let pool = &self.pool; + timed_query("get_skip_reason_counts", || async { + let rows = sqlx::query( + "SELECT COALESCE(reason_code, action) AS code, COUNT(*) AS count + FROM decisions + WHERE action = 'skip' + AND DATE(created_at, 'localtime') = DATE('now', 'localtime') + GROUP BY COALESCE(reason_code, action) + ORDER BY count DESC, code ASC + LIMIT 20", + ) + .fetch_all(pool) + .await?; + Ok(rows + .into_iter() + .map(|row| { + let code: String = row.get("code"); + let count: i64 = row.get("count"); + (code, count) + }) + .collect()) + }) + .await + } + pub async fn add_log(&self, level: &str, job_id: Option, message: &str) -> Result<()> { sqlx::query("INSERT INTO logs (level, job_id, message) VALUES (?, ?, ?)") .bind(level) diff --git a/src/explanations.rs b/src/explanations.rs index 9eead06..2387fd6 100644 --- a/src/explanations.rs +++ b/src/explanations.rs @@ -606,6 +606,21 @@ pub fn failure_from_summary(summary: &str) -> Explanation { ); } + if normalized.contains("vtcompressionsession") + || normalized.contains("kvtvideoencoder") + || normalized.contains("kvtvideoencodenotavailablenowerr") + || normalized.contains("videotoolbox session") + { + return Explanation::new( + ExplanationCategory::Failure, + "videotoolbox_session_failure", + "VideoToolbox session failed", + "The macOS VideoToolbox hardware encoder could not initialize or lost its session mid-encode. This can happen when the GPU is under load or when another process holds the hardware encoder.", + Some("Retry the job. If this repeats, reduce concurrent jobs, restart Alchemist, or enable CPU fallback.".to_string()), + summary, + ); + } + if normalized.contains("videotoolbox") || normalized.contains("vt_compression") || normalized.contains("mediaserverd") diff --git a/src/main.rs b/src/main.rs index 23135f7..8b96cfb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -795,11 +795,16 @@ async fn run() -> Result<()> { } } } else { - match args - .command - .clone() - .expect("CLI branch requires a subcommand") - { + let command = match args.command.clone() { + Some(command) => command, + None => { + return Err(alchemist::error::AlchemistError::Config( + "Missing CLI command".into(), + )); + } + }; + + match command { Commands::Scan { directories } => { agent.scan_and_enqueue(directories).await?; info!("Scan complete. Matching files were enqueued."); diff --git a/src/media/analyzer.rs b/src/media/analyzer.rs index 2dc342d..94ac11a 100644 --- a/src/media/analyzer.rs +++ b/src/media/analyzer.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use std::path::Path; use tokio::process::Command; -const FFPROBE_TIMEOUT_SECS: u64 = 120; +const FFPROBE_TIMEOUT_SECS: u64 = 30; async fn run_ffprobe(args: &[&str], path: &Path) -> Result { match tokio::time::timeout( diff --git a/src/media/executor.rs b/src/media/executor.rs index 0fc36f3..4dd4a1b 100644 --- a/src/media/executor.rs +++ b/src/media/executor.rs @@ -159,6 +159,18 @@ impl Executor for FfmpegExecutor { self.event_channels.clone(), )); + tracing::info!( + "Job {} execution path: requested_codec={}, planned_codec={}, encoder={:?}, backend={:?}, fallback={:?}", + job.id, + plan.requested_codec.as_str(), + planned_output_codec.as_str(), + encoder.map(|value| value.ffmpeg_encoder_name()), + used_backend.map(|value| value.as_str()), + plan.fallback + .as_ref() + .map(|fallback| fallback.reason.as_str()) + ); + self.transcoder .transcode_media(TranscodeRequest { job_id: Some(job.id), @@ -244,6 +256,14 @@ impl Executor for FfmpegExecutor { ); } + tracing::info!( + "Job {} output probe: actual_codec={:?}, actual_encoder={:?}, fallback_occurred={}", + job.id, + actual_output_codec.map(|value| value.as_str()), + actual_encoder_name.as_deref(), + plan.fallback.is_some() || codec_mismatch || encoder_mismatch + ); + Ok(ExecutionResult { requested_codec: plan.requested_codec, planned_output_codec, diff --git a/src/media/ffmpeg/amf.rs b/src/media/ffmpeg/amf.rs index 3a83874..0630b71 100644 --- a/src/media/ffmpeg/amf.rs +++ b/src/media/ffmpeg/amf.rs @@ -1,6 +1,13 @@ -use crate::media::pipeline::Encoder; +use crate::media::pipeline::{Encoder, RateControl}; + +pub fn append_args(args: &mut Vec, encoder: Encoder, rate_control: Option<&RateControl>) { + // AMF quality: CQP mode uses -rc cqp with -qp_i and -qp_p. + // The config uses CQ-style semantics (lower value = better quality). + let (use_cqp, qp_value) = match rate_control { + Some(RateControl::Cq { value }) => (true, *value), + _ => (false, 25), + }; -pub fn append_args(args: &mut Vec, encoder: Encoder) { match encoder { Encoder::Av1Amf => { args.extend(["-c:v".to_string(), "av1_amf".to_string()]); @@ -13,4 +20,15 @@ pub fn append_args(args: &mut Vec, encoder: Encoder) { } _ => {} } + + if use_cqp { + args.extend([ + "-rc".to_string(), + "cqp".to_string(), + "-qp_i".to_string(), + qp_value.to_string(), + "-qp_p".to_string(), + qp_value.to_string(), + ]); + } } diff --git a/src/media/ffmpeg/mod.rs b/src/media/ffmpeg/mod.rs index 14a8bda..d7c2f31 100644 --- a/src/media/ffmpeg/mod.rs +++ b/src/media/ffmpeg/mod.rs @@ -242,15 +242,20 @@ impl<'a> FFmpegCommandBuilder<'a> { ); } Encoder::Av1Vaapi | Encoder::HevcVaapi | Encoder::H264Vaapi => { - vaapi::append_args(&mut args, encoder, self.hw_info); + vaapi::append_args(&mut args, encoder, self.hw_info, rate_control.as_ref()); } Encoder::Av1Amf | Encoder::HevcAmf | Encoder::H264Amf => { - amf::append_args(&mut args, encoder); + amf::append_args(&mut args, encoder, rate_control.as_ref()); } Encoder::Av1Videotoolbox | Encoder::HevcVideotoolbox | Encoder::H264Videotoolbox => { - videotoolbox::append_args(&mut args, encoder, tag_hevc_as_hvc1); + videotoolbox::append_args( + &mut args, + encoder, + tag_hevc_as_hvc1, + rate_control.as_ref(), + ); } Encoder::Av1Svt | Encoder::Av1Aom | Encoder::HevcX265 | Encoder::H264X264 => { cpu::append_args( @@ -264,6 +269,12 @@ impl<'a> FFmpegCommandBuilder<'a> { } } + // Set maximum keyframe interval (~10s GOP) for all non-copy encodes. + // Improves seeking reliability; hardware encoders respect this upper bound. + if !self.plan.copy_video { + args.extend(["-g".to_string(), "250".to_string()]); + } + if let Some(RateControl::Bitrate { kbps }) = rate_control { args.extend(["-b:v".to_string(), format!("{kbps}k")]); } diff --git a/src/media/ffmpeg/nvenc.rs b/src/media/ffmpeg/nvenc.rs index f01f9e5..377f0e7 100644 --- a/src/media/ffmpeg/nvenc.rs +++ b/src/media/ffmpeg/nvenc.rs @@ -19,6 +19,8 @@ pub fn append_args( "av1_nvenc".to_string(), "-preset".to_string(), preset.clone(), + "-rc".to_string(), + "vbr".to_string(), "-cq".to_string(), cq.to_string(), ]); @@ -29,6 +31,8 @@ pub fn append_args( "hevc_nvenc".to_string(), "-preset".to_string(), preset.clone(), + "-rc".to_string(), + "vbr".to_string(), "-cq".to_string(), cq.to_string(), ]); @@ -39,6 +43,8 @@ pub fn append_args( "h264_nvenc".to_string(), "-preset".to_string(), preset, + "-rc".to_string(), + "vbr".to_string(), "-cq".to_string(), cq.to_string(), ]); diff --git a/src/media/ffmpeg/qsv.rs b/src/media/ffmpeg/qsv.rs index 1afda98..fa24efb 100644 --- a/src/media/ffmpeg/qsv.rs +++ b/src/media/ffmpeg/qsv.rs @@ -32,7 +32,7 @@ pub fn append_args( "-global_quality".to_string(), quality.to_string(), "-look_ahead".to_string(), - "1".to_string(), + "20".to_string(), ]); } Encoder::HevcQsv => { @@ -42,7 +42,7 @@ pub fn append_args( "-global_quality".to_string(), quality.to_string(), "-look_ahead".to_string(), - "1".to_string(), + "20".to_string(), ]); } Encoder::H264Qsv => { @@ -52,7 +52,7 @@ pub fn append_args( "-global_quality".to_string(), quality.to_string(), "-look_ahead".to_string(), - "1".to_string(), + "20".to_string(), ]); } _ => {} diff --git a/src/media/ffmpeg/vaapi.rs b/src/media/ffmpeg/vaapi.rs index 26d6a1e..d494ece 100644 --- a/src/media/ffmpeg/vaapi.rs +++ b/src/media/ffmpeg/vaapi.rs @@ -1,7 +1,12 @@ -use crate::media::pipeline::Encoder; +use crate::media::pipeline::{Encoder, RateControl}; use crate::system::hardware::HardwareInfo; -pub fn append_args(args: &mut Vec, encoder: Encoder, hw_info: Option<&HardwareInfo>) { +pub fn append_args( + args: &mut Vec, + encoder: Encoder, + hw_info: Option<&HardwareInfo>, + rate_control: Option<&RateControl>, +) { if let Some(hw) = hw_info { if let Some(ref device_path) = hw.device_path { args.extend(["-vaapi_device".to_string(), device_path.to_string()]); @@ -20,4 +25,12 @@ pub fn append_args(args: &mut Vec, encoder: Encoder, hw_info: Option<&Ha } _ => {} } + + // VAAPI quality is set via -global_quality (0–100, higher = better). + // The config uses CQ-style semantics where lower value = better quality, + // so we invert: global_quality = 100 - cq_value. + if let Some(RateControl::Cq { value }) = rate_control { + let global_quality = 100u8.saturating_sub(*value); + args.extend(["-global_quality".to_string(), global_quality.to_string()]); + } } diff --git a/src/media/ffmpeg/videotoolbox.rs b/src/media/ffmpeg/videotoolbox.rs index f969152..9aeb23b 100644 --- a/src/media/ffmpeg/videotoolbox.rs +++ b/src/media/ffmpeg/videotoolbox.rs @@ -1,33 +1,32 @@ -use crate::media::pipeline::Encoder; +use crate::media::pipeline::{Encoder, RateControl}; -pub fn append_args(args: &mut Vec, encoder: Encoder, tag_hevc_as_hvc1: bool) { - // Current FFmpeg VideoToolbox encoders on macOS do not expose qscale-style - // quality controls, so bitrate mode is handled by the shared builder and - // CQ-style requests intentionally fall back to the encoder defaults. +pub fn append_args( + args: &mut Vec, + encoder: Encoder, + tag_hevc_as_hvc1: bool, + rate_control: Option<&RateControl>, +) { + // VideoToolbox quality is controlled via -global_quality (0–100, 100=best). + // The config uses CQ-style semantics where lower value = better quality, + // so we invert: global_quality = 100 - cq_value. + // Bitrate mode is handled by the shared builder in mod.rs. match encoder { Encoder::Av1Videotoolbox => { - args.extend([ - "-c:v".to_string(), - "av1_videotoolbox".to_string(), - "-allow_sw".to_string(), - "1".to_string(), - ]); + args.extend(["-c:v".to_string(), "av1_videotoolbox".to_string()]); } Encoder::HevcVideotoolbox => { args.extend(["-c:v".to_string(), "hevc_videotoolbox".to_string()]); if tag_hevc_as_hvc1 { args.extend(["-tag:v".to_string(), "hvc1".to_string()]); } - args.extend(["-allow_sw".to_string(), "1".to_string()]); } Encoder::H264Videotoolbox => { - args.extend([ - "-c:v".to_string(), - "h264_videotoolbox".to_string(), - "-allow_sw".to_string(), - "1".to_string(), - ]); + args.extend(["-c:v".to_string(), "h264_videotoolbox".to_string()]); } _ => {} } + if let Some(RateControl::Cq { value }) = rate_control { + let global_quality = 100u8.saturating_sub(*value); + args.extend(["-global_quality".to_string(), global_quality.to_string()]); + } } diff --git a/src/media/pipeline.rs b/src/media/pipeline.rs index 487b039..1092a17 100644 --- a/src/media/pipeline.rs +++ b/src/media/pipeline.rs @@ -443,6 +443,8 @@ struct FinalizeJobContext<'a> { plan: &'a TranscodePlan, bypass_quality_gates: bool, start_time: std::time::Instant, + encode_started_at: chrono::DateTime, + attempt_number: i32, metadata: &'a MediaMetadata, execution_result: &'a ExecutionResult, } @@ -453,6 +455,8 @@ struct FinalizeFailureContext<'a> { execution_result: &'a ExecutionResult, config_snapshot: &'a crate::config::Config, start_time: std::time::Instant, + encode_started_at: chrono::DateTime, + attempt_number: i32, temp_output_path: &'a Path, } @@ -657,6 +661,13 @@ impl Pipeline { // Store the decision and return to queued — do NOT encode match &plan.decision { crate::media::pipeline::TranscodeDecision::Skip { reason } => { + let skip_code = reason.split('|').next().unwrap_or(reason).trim(); + tracing::info!( + job_id = job_id, + skip_code = skip_code, + "Job skipped: {}", + skip_code + ); self.db.add_decision(job_id, "skip", reason).await.ok(); self.db .update_job_status(job_id, crate::db::JobState::Skipped) @@ -747,6 +758,7 @@ impl Pipeline { if self.db.increment_attempt_count(job.id).await.is_err() { return Err(JobFailure::Transient); } + let current_attempt_number = job.attempt_count + 1; if self .update_job_state(job.id, crate::db::JobState::Analyzing) .await @@ -905,6 +917,15 @@ impl Pipeline { } } + match self.should_stop_job(job.id).await { + Ok(true) => { + tracing::info!("Job {} was cancelled during encode planning.", job.id); + return Ok(()); + } + Ok(false) => {} + Err(_) => return Err(JobFailure::Transient), + } + let (should_execute, action, reason, next_status) = match &plan.decision { TranscodeDecision::Transcode { reason } => ( true, @@ -925,7 +946,14 @@ impl Pipeline { }; if !should_execute { - tracing::info!("Decision: SKIP Job {} - {}", job.id, &reason); + let explanation = crate::explanations::decision_from_legacy("skip", &reason); + tracing::info!( + "Decision: SKIP Job {} - {} (code={}, summary={})", + job.id, + &reason, + explanation.code, + explanation.summary + ); let _ = self.db.add_decision(job.id, "skip", &reason).await; let _ = self .update_job_state(job.id, crate::db::JobState::Skipped) @@ -999,6 +1027,7 @@ impl Pipeline { self.dry_run, ); + let encode_started_at = chrono::Utc::now(); match executor.execute(&job, &plan, &analysis).await { Ok(result) => { if result.fallback_occurred && !plan.allow_fallback { @@ -1013,6 +1042,20 @@ impl Pipeline { let _ = self .update_job_state(job.id, crate::db::JobState::Failed) .await; + let _ = self + .db + .insert_encode_attempt(crate::db::EncodeAttemptInput { + job_id: job.id, + attempt_number: current_attempt_number, + started_at: Some(encode_started_at.to_rfc3339()), + outcome: "failed".to_string(), + failure_code: Some("fallback_blocked".to_string()), + failure_summary: Some(summary.to_string()), + input_size_bytes: Some(metadata.size_bytes as i64), + output_size_bytes: None, + encode_time_seconds: Some(start_time.elapsed().as_secs_f64()), + }) + .await; return Err(JobFailure::EncoderUnavailable); } @@ -1026,6 +1069,8 @@ impl Pipeline { plan: &plan, bypass_quality_gates, start_time, + encode_started_at, + attempt_number: current_attempt_number, metadata, execution_result: &result, }, @@ -1040,6 +1085,8 @@ impl Pipeline { execution_result: &result, config_snapshot: &config_snapshot, start_time, + encode_started_at, + attempt_number: current_attempt_number, temp_output_path: &temp_output_path, }, &err, @@ -1093,6 +1140,20 @@ impl Pipeline { let _ = self .update_job_state(job.id, crate::db::JobState::Cancelled) .await; + let _ = self + .db + .insert_encode_attempt(crate::db::EncodeAttemptInput { + job_id: job.id, + attempt_number: current_attempt_number, + started_at: Some(encode_started_at.to_rfc3339()), + outcome: "cancelled".to_string(), + failure_code: None, + failure_summary: None, + input_size_bytes: Some(metadata.size_bytes as i64), + output_size_bytes: None, + encode_time_seconds: Some(start_time.elapsed().as_secs_f64()), + }) + .await; } else { let msg = format!("Transcode failed: {e}"); tracing::error!("Job {}: {}", job.id, msg); @@ -1105,6 +1166,20 @@ impl Pipeline { let _ = self .update_job_state(job.id, crate::db::JobState::Failed) .await; + let _ = self + .db + .insert_encode_attempt(crate::db::EncodeAttemptInput { + job_id: job.id, + attempt_number: current_attempt_number, + started_at: Some(encode_started_at.to_rfc3339()), + outcome: "failed".to_string(), + failure_code: Some(explanation.code.clone()), + failure_summary: Some(msg), + input_size_bytes: Some(metadata.size_bytes as i64), + output_size_bytes: None, + encode_time_seconds: Some(start_time.elapsed().as_secs_f64()), + }) + .await; } Err(map_failure(&e)) } @@ -1360,6 +1435,20 @@ impl Pipeline { self.update_job_state(job_id, crate::db::JobState::Completed) .await?; self.update_job_progress(job_id, 100.0).await; + let _ = self + .db + .insert_encode_attempt(crate::db::EncodeAttemptInput { + job_id, + attempt_number: context.attempt_number, + started_at: Some(context.encode_started_at.to_rfc3339()), + outcome: "completed".to_string(), + failure_code: None, + failure_summary: None, + input_size_bytes: Some(input_size as i64), + output_size_bytes: Some(output_size as i64), + encode_time_seconds: Some(encode_duration), + }) + .await; self.emit_telemetry_event(TelemetryEventParams { telemetry_enabled, @@ -1466,6 +1555,20 @@ impl Pipeline { let _ = self .update_job_state(job_id, crate::db::JobState::Failed) .await; + let _ = self + .db + .insert_encode_attempt(crate::db::EncodeAttemptInput { + job_id, + attempt_number: context.attempt_number, + started_at: Some(context.encode_started_at.to_rfc3339()), + outcome: "failed".to_string(), + failure_code: Some(failure_explanation.code.clone()), + failure_summary: Some(message), + input_size_bytes: Some(context.metadata.size_bytes as i64), + output_size_bytes: None, + encode_time_seconds: Some(context.start_time.elapsed().as_secs_f64()), + }) + .await; } async fn emit_telemetry_event(&self, params: TelemetryEventParams<'_>) { @@ -1779,6 +1882,8 @@ mod tests { execution_result: &result, config_snapshot: &config_snapshot, start_time: std::time::Instant::now(), + encode_started_at: chrono::Utc::now(), + attempt_number: 1, temp_output_path: &temp_output, }, &crate::error::AlchemistError::Unknown("disk full".to_string()), diff --git a/src/media/planner.rs b/src/media/planner.rs index 002e2ed..da3d67a 100644 --- a/src/media/planner.rs +++ b/src/media/planner.rs @@ -339,12 +339,13 @@ fn should_transcode( }; let normalized_bpp = bpp.map(|value| value * res_correction); + // Raise threshold for uncertain analysis: low confidence = fewer speculative encodes. let mut threshold = match analysis.confidence { crate::media::pipeline::AnalysisConfidence::High => config.transcode.min_bpp_threshold, crate::media::pipeline::AnalysisConfidence::Medium => { - config.transcode.min_bpp_threshold * 0.7 + config.transcode.min_bpp_threshold * 1.3 } - crate::media::pipeline::AnalysisConfidence::Low => config.transcode.min_bpp_threshold * 0.5, + crate::media::pipeline::AnalysisConfidence::Low => config.transcode.min_bpp_threshold * 1.8, }; if target_codec == OutputCodec::Av1 { threshold *= 0.7; @@ -626,8 +627,16 @@ fn encoder_runtime_settings( }, None, ), - Encoder::Av1Nvenc | Encoder::HevcNvenc | Encoder::H264Nvenc => ( - RateControl::Cq { value: 25 }, + Encoder::Av1Nvenc => ( + RateControl::Cq { value: 28 }, + Some(quality_profile.nvenc_preset().to_string()), + ), + Encoder::HevcNvenc => ( + RateControl::Cq { value: 24 }, + Some(quality_profile.nvenc_preset().to_string()), + ), + Encoder::H264Nvenc => ( + RateControl::Cq { value: 21 }, Some(quality_profile.nvenc_preset().to_string()), ), Encoder::Av1Videotoolbox | Encoder::HevcVideotoolbox | Encoder::H264Videotoolbox => ( @@ -645,7 +654,18 @@ fn encoder_runtime_settings( Some(preset.to_string()), ) } - Encoder::Av1Aom => (RateControl::Crf { value: 32 }, Some("6".to_string())), + Encoder::Av1Aom => { + let (cpu_used, default_crf) = match config.hardware.cpu_preset { + crate::config::CpuPreset::Slow => ("2", 24u8), + crate::config::CpuPreset::Medium => ("4", 28u8), + crate::config::CpuPreset::Fast => ("6", 30u8), + crate::config::CpuPreset::Faster => ("8", 32u8), + }; + ( + RateControl::Crf { value: default_crf }, + Some(cpu_used.to_string()), + ) + } Encoder::HevcX265 => { let preset = config.hardware.cpu_preset.as_str().to_string(); let default_crf = match config.hardware.cpu_preset { @@ -901,7 +921,10 @@ fn plan_subtitles( } } -fn subtitle_copy_supported(container: &str, subtitle_streams: &[SubtitleStreamMetadata]) -> bool { +pub(crate) fn subtitle_copy_supported( + container: &str, + subtitle_streams: &[SubtitleStreamMetadata], +) -> bool { if subtitle_streams.is_empty() { return true; } diff --git a/src/media/processor.rs b/src/media/processor.rs index 0a2184e..5f4fa25 100644 --- a/src/media/processor.rs +++ b/src/media/processor.rs @@ -68,7 +68,7 @@ impl Agent { in_flight_jobs: Arc::new(AtomicUsize::new(0)), idle_notified: Arc::new(AtomicBool::new(false)), analyzing_boot: Arc::new(AtomicBool::new(false)), - analysis_semaphore: Arc::new(tokio::sync::Semaphore::new(1)), + analysis_semaphore: Arc::new(tokio::sync::Semaphore::new(concurrent_jobs.clamp(1, 4))), } } @@ -167,6 +167,38 @@ impl Agent { self.draining.store(false, Ordering::SeqCst); } + /// Restart the engine loop without re-execing the process. + /// Pauses the engine, cancels all in-flight jobs, resets state flags, + /// and resumes. Cancelled jobs remain in the cancelled state. + pub async fn restart(&self) { + info!("Engine restart requested."); + self.pause(); + + let active_states = [ + crate::db::JobState::Encoding, + crate::db::JobState::Remuxing, + crate::db::JobState::Analyzing, + crate::db::JobState::Resuming, + ]; + for state in &active_states { + match self.db.get_jobs_by_status(*state).await { + Ok(jobs) => { + for job in jobs { + self.orchestrator.cancel_job(job.id); + } + } + Err(e) => { + error!("Restart: failed to fetch {:?} jobs: {}", state, e); + } + } + } + + self.draining.store(false, Ordering::SeqCst); + self.idle_notified.store(false, Ordering::SeqCst); + self.resume(); + info!("Engine restart complete."); + } + pub fn set_boot_analyzing(&self, value: bool) { self.analyzing_boot.store(value, Ordering::SeqCst); if value { @@ -311,6 +343,11 @@ impl Agent { return; } + info!( + "Updating concurrent job limit from {} to {}", + current, new_limit + ); + if new_limit > current { let mut held = self.held_permits.lock().await; let mut increase = new_limit - current; @@ -392,6 +429,11 @@ impl Agent { continue; } }; + debug!( + "Worker slot acquired (in_flight={}, limit={})", + self.in_flight_jobs.load(Ordering::SeqCst), + self.concurrent_jobs_limit() + ); // Re-check drain after permit acquisition (belt-and-suspenders) if self.is_draining() { @@ -403,7 +445,13 @@ impl Agent { match self.db.claim_next_job().await { Ok(Some(job)) => { self.idle_notified.store(false, Ordering::SeqCst); - self.in_flight_jobs.fetch_add(1, Ordering::SeqCst); + let next_in_flight = self.in_flight_jobs.fetch_add(1, Ordering::SeqCst) + 1; + info!( + "Claimed job {} for processing (in_flight={}, limit={})", + job.id, + next_in_flight, + self.concurrent_jobs_limit() + ); let agent = self.clone(); let counter = self.in_flight_jobs.clone(); tokio::spawn(async move { @@ -423,6 +471,11 @@ impl Agent { }); } Ok(None) => { + debug!( + "No queued job available (in_flight={}, limit={})", + self.in_flight_jobs.load(Ordering::SeqCst), + self.concurrent_jobs_limit() + ); if self.in_flight_jobs.load(Ordering::SeqCst) == 0 && !self.idle_notified.swap(true, Ordering::SeqCst) { diff --git a/src/orchestrator.rs b/src/orchestrator.rs index 4fd2d42..f33278a 100644 --- a/src/orchestrator.rs +++ b/src/orchestrator.rs @@ -13,6 +13,8 @@ use tokio::sync::oneshot; use tracing::{error, info, warn}; pub struct Transcoder { + // std::sync::Mutex is intentional: critical sections never cross .await boundaries, + // so there is no deadlock risk. Contention is negligible (≤ concurrent_jobs entries). cancel_channels: Arc>>>, pending_cancels: Arc>>, } @@ -234,6 +236,7 @@ impl Transcoder { total_duration: Option, ) -> Result<()> { info!("Executing FFmpeg command: {:?}", cmd); + let ffmpeg_start = std::time::Instant::now(); cmd.stdout(Stdio::null()).stderr(Stdio::piped()); if let Some(id) = job_id { @@ -286,15 +289,21 @@ impl Transcoder { } } + info!( + "Job {:?}: FFmpeg spawned ({:.3}s since command start)", + job_id, + ffmpeg_start.elapsed().as_secs_f64() + ); let mut reader = BufReader::new(stderr).lines(); let mut kill_rx = kill_rx; let mut killed = false; let mut last_lines = std::collections::VecDeque::with_capacity(20); let mut progress_state = FFmpegProgressState::default(); + let mut first_frame_logged = false; loop { tokio::select! { - line_res_timeout = tokio::time::timeout(tokio::time::Duration::from_secs(600), reader.next_line()) => { + line_res_timeout = tokio::time::timeout(tokio::time::Duration::from_secs(120), reader.next_line()) => { match line_res_timeout { Ok(line_res) => match line_res { Ok(Some(line)) => { @@ -308,11 +317,28 @@ impl Transcoder { last_lines.pop_front(); } + // Detect VideoToolbox software fallback + if line.contains("Using software encoder") || line.contains("using software encoder") { + warn!( + "Job {:?}: VideoToolbox falling back to software encoder ({}s elapsed)", + job_id, + ffmpeg_start.elapsed().as_secs_f64() + ); + } + if let Some(observer) = observer.as_ref() { observer.on_log(line.clone()).await; if let Some(total_duration) = total_duration { if let Some(progress) = progress_state.ingest_line(&line) { + if !first_frame_logged { + first_frame_logged = true; + info!( + "Job {:?}: first progress event ({:.3}s since spawn)", + job_id, + ffmpeg_start.elapsed().as_secs_f64() + ); + } observer.on_progress(progress, total_duration).await; } } @@ -325,7 +351,7 @@ impl Transcoder { } }, Err(_) => { - error!("Job {:?} stalled: No output from FFmpeg for 10 minutes. Killing process...", job_id); + error!("Job {:?} stalled: No output from FFmpeg for 2 minutes. Killing process...", job_id); let _ = child.kill().await; killed = true; if let Some(id) = job_id { @@ -379,7 +405,11 @@ impl Transcoder { } if status.success() { - info!("FFmpeg command completed successfully"); + info!( + "Job {:?}: FFmpeg completed successfully ({:.3}s total)", + job_id, + ffmpeg_start.elapsed().as_secs_f64() + ); Ok(()) } else { let error_detail = last_lines.make_contiguous().join("\n"); diff --git a/src/server/jobs.rs b/src/server/jobs.rs index 755fcec..3e912a2 100644 --- a/src/server/jobs.rs +++ b/src/server/jobs.rs @@ -325,6 +325,7 @@ pub(crate) struct JobDetailResponse { job: Job, metadata: Option, encode_stats: Option, + encode_attempts: Vec, job_logs: Vec, job_failure_summary: Option, decision_explanation: Option, @@ -343,11 +344,7 @@ pub(crate) async fn get_job_detail_handler( // Avoid long probes while the job is still active. let metadata = match job.status { - JobState::Queued - | JobState::Analyzing - | JobState::Encoding - | JobState::Remuxing - | JobState::Completed => None, + JobState::Queued | JobState::Analyzing => None, _ => { let analyzer = crate::media::analyzer::FfmpegAnalyzer; use crate::media::pipeline::Analyzer; @@ -403,10 +400,17 @@ pub(crate) async fn get_job_detail_handler( (None, None) }; + let encode_attempts = state + .db + .get_encode_attempts_by_job(id) + .await + .unwrap_or_default(); + axum::Json(JobDetailResponse { job, metadata, encode_stats, + encode_attempts, job_logs, job_failure_summary, decision_explanation, @@ -439,6 +443,13 @@ pub(crate) async fn stop_drain_handler(State(state): State>) -> im axum::Json(serde_json::json!({ "status": "running" })) } +pub(crate) async fn restart_engine_handler( + State(state): State>, +) -> impl IntoResponse { + state.agent.restart().await; + axum::Json(serde_json::json!({ "status": "running" })) +} + pub(crate) async fn engine_status_handler(State(state): State>) -> impl IntoResponse { axum::Json(serde_json::json!({ "status": if state.agent.is_draining() { diff --git a/src/server/middleware.rs b/src/server/middleware.rs index fd20d7d..bc04e33 100644 --- a/src/server/middleware.rs +++ b/src/server/middleware.rs @@ -149,7 +149,28 @@ pub(crate) async fn auth_middleware( } fn request_is_lan(req: &Request) -> bool { - request_ip(req).is_some_and(is_lan_ip) + let direct_peer = req + .extensions() + .get::>() + .map(|info| info.0.ip()); + let resolved = request_ip(req); + + // If resolved IP differs from direct peer, forwarded headers were used. + // Warn operators so misconfigured proxies surface in logs. + if let (Some(peer), Some(resolved_ip)) = (direct_peer, resolved) { + if peer != resolved_ip && is_lan_ip(resolved_ip) { + tracing::warn!( + peer_ip = %peer, + resolved_ip = %resolved_ip, + "Setup gate: access permitted via forwarded headers. \ + Verify your reverse proxy is forwarding client IPs correctly \ + (X-Forwarded-For / X-Real-IP). Misconfigured proxies may \ + expose setup to public traffic." + ); + } + } + + resolved.is_some_and(is_lan_ip) } fn read_only_api_token_allows(method: &Method, path: &str) -> bool { diff --git a/src/server/mod.rs b/src/server/mod.rs index 0e749d6..fdca9ba 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -307,6 +307,7 @@ fn app_router(state: Arc) -> Router { .route("/api/stats/daily", get(daily_stats_handler)) .route("/api/stats/detailed", get(detailed_stats_handler)) .route("/api/stats/savings", get(savings_summary_handler)) + .route("/api/stats/skip-reasons", get(skip_reasons_handler)) // Canonical job list endpoint. .route("/api/jobs", get(jobs_table_handler)) .route("/api/jobs/table", get(jobs_table_handler)) @@ -339,6 +340,7 @@ fn app_router(state: Arc) -> Router { .route("/api/engine/resume", post(resume_engine_handler)) .route("/api/engine/drain", post(drain_engine_handler)) .route("/api/engine/stop-drain", post(stop_drain_handler)) + .route("/api/engine/restart", post(restart_engine_handler)) .route( "/api/engine/mode", get(get_engine_mode_handler).post(set_engine_mode_handler), diff --git a/src/server/stats.rs b/src/server/stats.rs index 724ab52..ffe4dcf 100644 --- a/src/server/stats.rs +++ b/src/server/stats.rs @@ -101,3 +101,16 @@ pub(crate) async fn savings_summary_handler( Err(err) => config_read_error_response("load storage savings summary", &err), } } + +pub(crate) async fn skip_reasons_handler(State(state): State>) -> impl IntoResponse { + match state.db.get_skip_reason_counts().await { + Ok(counts) => { + let items: Vec = counts + .into_iter() + .map(|(code, count)| serde_json::json!({ "code": code, "count": count })) + .collect(); + axum::Json(serde_json::json!({ "today": items })).into_response() + } + Err(err) => config_read_error_response("load skip reason counts", &err), + } +} diff --git a/stitch_findings.md b/stitch_findings.md deleted file mode 100644 index e3237e7..0000000 --- a/stitch_findings.md +++ /dev/null @@ -1,74 +0,0 @@ -# Alchemist Project Audit & Findings - -This document provides a comprehensive audit of the Alchemist media transcoding project (v0.3.0-rc.3), covering backend architecture, frontend design, database schema, and operational workflows. - ---- - -## 1. Project Architecture & Pipeline - -Alchemist implements a robust, asynchronous media transcoding pipeline managed by a central `Agent`. The pipeline follows a strictly ordered lifecycle: - -1. **Scanner (`src/media/scanner.rs`):** Performs a high-speed traversal of watch folders. It uses `mtime_hash` (seconds + nanoseconds) to detect changes without full file analysis, efficiently handling re-scans and minimizing DB writes. -2. **Analyzer (`src/media/analyzer.rs`):** Executes `ffprobe` to extract normalized media metadata (codecs, bit depth, BPP, bitrate). Analysis results are used to populate the `DetailedEncodeStats` and `Decision` tables. -3. **Planner (`src/media/planner.rs`):** A complex decision engine that evaluates whether to **Skip**, **Remux**, or **Transcode** a file based on user profiles. - * *Finding:* The planning logic is heavily hardcoded with "magic thresholds" (e.g., Bits-per-pixel thresholds). While effective, these could be more exposed as "Advanced Settings" in the UI. -4. **Executor (`src/media/executor.rs`):** Orchestrates the `ffmpeg` process. It dynamically selects encoders (NVENC, VAAPI, QSV, ProRes, or CPU fallback) based on the target profile and host hardware capabilities detected in `src/system/hardware.rs`. - ---- - -## 2. Backend & API Design (Rust/Axum) - -* **Concurrency:** Utilizes `tokio` for async orchestration and `rayon` for CPU-intensive tasks (like file hashing or list processing). The scheduler supports multiple concurrency modes: `Background` (1 job), `Balanced` (capped), and `Throughput` (uncapped). -* **State Management:** The backend uses `broadcast` channels to separate high-volume events (Progress, Logs) from low-volume system events (Config updates). This prevents UI "flicker" and unnecessary re-renders in the frontend. -* **API Structure:** - * **RESTful endpoints** for jobs, settings, and stats. - * **SSE (`src/server/sse.rs`)** for real-time progress updates, ensuring a reactive UI without high-frequency polling. - * **Auth (`src/server/auth.rs`):** Implements JWT-based authentication with Argon2 hashing for the initial setup. - ---- - -## 3. Database Schema (SQLite/SQLx) - -* **Stability:** The project uses 16+ migrations, showing a mature evolution from a simple schema to a sophisticated job-tracking system. -* **Decision Logging:** The `decisions` and `job_failure_explanations` tables are a standout feature. They store the "why" behind every action as structured JSON, which is then humanized in the UI (e.g., explaining exactly why a file was skipped). -* **Data Integrity:** Foreign keys and WAL (Write-Ahead Logging) mode ensure database stability even during heavy concurrent I/O. - ---- - -## 4. Frontend Design (Astro/React/Helios) - -* **Stack:** Astro 5 provides a fast, static-first framework with React 18 handles the complex stateful dashboards. -* **Design System ("Helios"):** - * *Identity:* A dark-themed, data-dense industrial aesthetic. - * *Findings:* While functional, the system suffers from "component bloat." `JobManager.tsx` (~2,000 lines) is a significant maintainability risk. It contains UI logic, filtering logic, and data transformation logic mixed together. -* **Data Visualization:** Uses `recharts` for historical trends and performance metrics. - * *Improvement:* The charts are currently static snapshots. Adding real-time interactivity (brushing, zooming) would improve the exploration of large datasets. - ---- - -## 5. System & Hardware Integration - -* **Hardware Discovery:** `src/system/hardware.rs` is extensive, detecting NVIDIA, Intel, AMD, and Apple Silicon capabilities. It correctly maps these to `ffmpeg` encoder flags. -* **FS Browser:** A custom filesystem browser (`src/system/fs_browser.rs`) allows for secure directory selection during setup, preventing path injection and ensuring platform-agnostic path handling. - ---- - -## 6. Critical Areas for Improvement - -### **Maintainability (High Priority)** -* **Decouple `JobManager.tsx`:** Refactor into functional hooks (`useJobs`, `useFilters`) and smaller, presentation-only components. -* **Standardize Formatters:** Move `formatBytes`, `formatTime`, and `formatReduction` into a centralized `lib/formatters.ts` to reduce code duplication across the Dashboard and Stats pages. - -### **UX & Performance (Medium Priority)** -* **Polling vs. SSE:** Ensure all real-time metrics (like GPU temperature) are delivered via SSE rather than periodic polling to reduce backend load and improve UI responsiveness. -* **Interactive Decision Explanations:** The current skip reasons are helpful but static. Adding links to the relevant settings (e.g., "Change this threshold in Transcoding Settings") would close the loop for users. - -### **Reliability (Low Priority)** -* **E2E Testing:** While Playwright tests exist, they focus on "reliability." Expanding these to cover complex "edge cases" (like network-attached storage disconnects during a scan) would improve long-term stability. - ---- - -## 7. Stitch Recommendation -Use Stitch to generate **atomic component refinements** based on this audit. -* *Prompt Example:* "Refine the JobTable row to use iconic status indicators with tooltips for skip reasons, as outlined in the Alchemist Audit." -* *Prompt Example:* "Create a unified `Formatter` utility library in TypeScript that handles bytes, time, and percentage formatting for the Helios design system." diff --git a/web-e2e/package.json b/web-e2e/package.json index 5ee573b..d761763 100644 --- a/web-e2e/package.json +++ b/web-e2e/package.json @@ -1,6 +1,6 @@ { "name": "alchemist-web-e2e", - "version": "0.3.1-rc.1", + "version": "0.3.1-rc.3", "private": true, "packageManager": "bun@1", "type": "module", diff --git a/web-e2e/tests/engine-lifecycle.spec.ts b/web-e2e/tests/engine-lifecycle.spec.ts new file mode 100644 index 0000000..1828e1d --- /dev/null +++ b/web-e2e/tests/engine-lifecycle.spec.ts @@ -0,0 +1,102 @@ +import { expect, test } from "@playwright/test"; +import { + createEngineMode, + createEngineStatus, + fulfillJson, + mockDashboardData, +} from "./helpers"; + +test.use({ storageState: undefined }); + +test.beforeEach(async ({ page }) => { + await mockDashboardData(page); + await page.route("**/api/engine/mode", async (route) => { + await fulfillJson(route, 200, createEngineMode()); + }); +}); + +test("pause then resume transitions engine state correctly", async ({ page }) => { + let engineStatus = createEngineStatus({ status: "running", manual_paused: false }); + let pauseCalls = 0; + let resumeCalls = 0; + + await page.route("**/api/engine/status", async (route) => { + await fulfillJson(route, 200, engineStatus); + }); + await page.route("**/api/engine/pause", async (route) => { + pauseCalls += 1; + engineStatus = createEngineStatus({ status: "paused", manual_paused: true }); + await fulfillJson(route, 200, { status: "paused" }); + }); + await page.route("**/api/engine/resume", async (route) => { + resumeCalls += 1; + engineStatus = createEngineStatus({ status: "running", manual_paused: false }); + await fulfillJson(route, 200, { status: "running" }); + }); + + await page.goto("/settings?tab=system"); + + await page.getByRole("button", { name: "Pause" }).click(); + await expect.poll(() => pauseCalls).toBe(1); + + await page.getByRole("button", { name: "Start" }).click(); + await expect.poll(() => resumeCalls).toBe(1); +}); + +test("drain transitions to draining state and cancel-stop reverts it", async ({ page }) => { + let engineStatus = createEngineStatus({ status: "running", manual_paused: false }); + let drainCalls = 0; + let stopDrainCalls = 0; + + await page.route("**/api/engine/status", async (route) => { + await fulfillJson(route, 200, engineStatus); + }); + await page.route("**/api/engine/drain", async (route) => { + drainCalls += 1; + engineStatus = createEngineStatus({ + status: "draining", + manual_paused: false, + draining: true, + }); + await fulfillJson(route, 200, { status: "draining" }); + }); + await page.route("**/api/engine/stop-drain", async (route) => { + stopDrainCalls += 1; + engineStatus = createEngineStatus({ status: "running", manual_paused: false }); + await fulfillJson(route, 200, { status: "running" }); + }); + + await page.goto("/"); + + await page.getByRole("button", { name: "Stop" }).click(); + await expect.poll(() => drainCalls).toBe(1); + await expect(page.getByText("Stopping", { exact: true })).toBeVisible(); + + await expect.poll(() => stopDrainCalls).toBe(0); +}); + +test("engine restart endpoint is called and status returns to running", async ({ page }) => { + let engineStatus = createEngineStatus({ status: "running", manual_paused: false }); + let restartCalls = 0; + + await page.route("**/api/engine/status", async (route) => { + await fulfillJson(route, 200, engineStatus); + }); + await page.route("**/api/engine/restart", async (route) => { + restartCalls += 1; + engineStatus = createEngineStatus({ status: "running", manual_paused: false }); + await fulfillJson(route, 200, { status: "running" }); + }); + + await page.goto("/"); + + const result = await page.evaluate(async () => { + const res = await fetch("/api/engine/restart", { method: "POST" }); + const body = await res.json() as { status: string }; + return { status: res.status, body }; + }); + + expect(restartCalls).toBe(1); + expect(result.status).toBe(200); + expect(result.body.status).toBe("running"); +}); diff --git a/web-e2e/tests/helpers.ts b/web-e2e/tests/helpers.ts index d99bc9d..dd8c2f0 100644 --- a/web-e2e/tests/helpers.ts +++ b/web-e2e/tests/helpers.ts @@ -159,6 +159,18 @@ export interface JobDetailFixture { message: string; created_at: string; }>; + encode_attempts?: Array<{ + id: number; + attempt_number: number; + started_at: string | null; + finished_at: string; + outcome: "completed" | "failed" | "cancelled"; + failure_code: string | null; + failure_summary: string | null; + input_size_bytes: number | null; + output_size_bytes: number | null; + encode_time_seconds: number | null; + }>; job_failure_summary?: string; decision_explanation?: ExplanationFixture | null; failure_explanation?: ExplanationFixture | null; diff --git a/web-e2e/tests/jobs-success.spec.ts b/web-e2e/tests/jobs-success.spec.ts index 7097634..e3df46b 100644 --- a/web-e2e/tests/jobs-success.spec.ts +++ b/web-e2e/tests/jobs-success.spec.ts @@ -142,7 +142,7 @@ test("search requests are debounced and failed job details show summary and logs await mockJobDetails(page, { 2: failedDetail }); await page.goto("/jobs"); - await page.getByPlaceholder("Search files...").fill("failed"); + await page.getByPlaceholder("Search files...").first().fill("failed"); await expect .poll(() => requests.some((url) => url.searchParams.get("search") === "failed")) @@ -286,7 +286,7 @@ test("queued job with no metadata shows waiting for analysis placeholder", async await page.getByTitle("/media/queued.mkv").click(); await expect(page.getByRole("dialog")).toBeVisible(); - await expect(page.getByText("Waiting for analysis")).toBeVisible(); + await expect(page.getByText("Waiting in queue")).toBeVisible(); await expect(page.getByText("Unknown bit depth")).not.toBeVisible(); }); diff --git a/web-e2e/tests/settings-success.spec.ts b/web-e2e/tests/settings-success.spec.ts index 13fdf4a..bea4ce3 100644 --- a/web-e2e/tests/settings-success.spec.ts +++ b/web-e2e/tests/settings-success.spec.ts @@ -162,7 +162,7 @@ test("notification targets can be added, tested, and removed", async ({ page }) await expect(page.getByText("Test notification sent.").first()).toBeVisible(); expect(testPayload).toMatchObject({ name: "Playwright Target", - target_type: "discord", + target_type: "discord_webhook", }); await page.getByLabel("Delete notification target Playwright Target").click(); diff --git a/web/package.json b/web/package.json index 68b09cc..bb3156f 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "alchemist-web", - "version": "0.3.1-rc.1", + "version": "0.3.1-rc.3", "private": true, "packageManager": "bun@1", "type": "module", diff --git a/web/src/components/AboutDialog.tsx b/web/src/components/AboutDialog.tsx index adbb26b..e3f76ec 100644 --- a/web/src/components/AboutDialog.tsx +++ b/web/src/components/AboutDialog.tsx @@ -5,18 +5,18 @@ import { apiJson, isApiError } from "../lib/api"; import { showToast } from "../lib/toast"; interface SystemInfo { - version: string; - os_version: string; - is_docker: boolean; - telemetry_enabled: boolean; ffmpeg_version: string; + is_docker: boolean; + os_version: string; + telemetry_enabled: boolean; + version: string; } interface UpdateInfo { current_version: string; latest_version: string | null; - update_available: boolean; release_url: string | null; + update_available: boolean; } interface AboutDialogProps { diff --git a/web/src/components/ErrorBoundary.tsx b/web/src/components/ErrorBoundary.tsx index 15641a1..dcb7494 100644 --- a/web/src/components/ErrorBoundary.tsx +++ b/web/src/components/ErrorBoundary.tsx @@ -8,14 +8,14 @@ interface Props { } interface State { - hasError: boolean; - errorMessage: string; + errorMessage: string; + hasError: boolean; } export class ErrorBoundary extends Component { public state: State = { - hasError: false, - errorMessage: "", + errorMessage: "", + hasError: false, }; public static getDerivedStateFromError(error: Error): State { diff --git a/web/src/components/HardwareSettings.tsx b/web/src/components/HardwareSettings.tsx index fc9a18c..516c462 100644 --- a/web/src/components/HardwareSettings.tsx +++ b/web/src/components/HardwareSettings.tsx @@ -14,24 +14,24 @@ interface HardwareInfo { failed: number; }; backends?: Array<{ - kind: string; codec: string; - encoder: string; device_path: string | null; + encoder: string; + kind: string; }>; detection_notes?: string[]; } interface HardwareProbeEntry { - vendor: string; - codec: string; - encoder: string; backend: string; + codec: string; device_path: string | null; - success: boolean; + encoder: string; selected: boolean; - summary: string; stderr?: string | null; + success: boolean; + summary: string; + vendor: string; } interface HardwareProbeLog { @@ -39,11 +39,11 @@ interface HardwareProbeLog { } interface HardwareSettings { - allow_cpu_fallback: boolean; allow_cpu_encoding: boolean; + allow_cpu_fallback: boolean; cpu_preset: string; - preferred_vendor: string | null; device_path: string | null; + preferred_vendor: string | null; } export default function HardwareSettings() { diff --git a/web/src/components/HeaderActions.tsx b/web/src/components/HeaderActions.tsx index e7a3e3d..e6c4fe5 100644 --- a/web/src/components/HeaderActions.tsx +++ b/web/src/components/HeaderActions.tsx @@ -39,15 +39,16 @@ export default function HeaderActions() { labelColor: "text-helios-solar", }, draining: { - dot: "bg-helios-slate animate-pulse", + dot: "bg-helios-solar animate-pulse", label: "Stopping", - labelColor: "text-helios-slate", + labelColor: "text-helios-solar", }, } as const; const status = engineStatus?.status ?? "paused"; const isIdle = status === "running" && (stats?.active ?? 0) === 0; - const displayStatus: keyof typeof statusConfig = isIdle ? "idle" : status; + const displayStatus: keyof typeof statusConfig = + status === "draining" ? "draining" : isIdle ? "idle" : status; const refreshEngineStatus = async () => { const data = await apiJson("/api/engine/status"); diff --git a/web/src/components/JobManager.tsx b/web/src/components/JobManager.tsx index aca7c8f..fdbcf4f 100644 --- a/web/src/components/JobManager.tsx +++ b/web/src/components/JobManager.tsx @@ -1,17 +1,20 @@ import { useState, useEffect, useCallback, useRef } from "react"; import { createPortal } from "react-dom"; -import { - Search, RefreshCw, Trash2, Ban, - Clock, X, Info, Activity, Database, Zap, Maximize2, MoreHorizontal, ArrowDown, ArrowUp, AlertCircle -} from "lucide-react"; +import { RefreshCw, Trash2, Ban } from "lucide-react"; import { apiAction, apiJson, isApiError } from "../lib/api"; import { useDebouncedValue } from "../lib/useDebouncedValue"; import { showToast } from "../lib/toast"; import ConfirmDialog from "./ui/ConfirmDialog"; import { clsx, type ClassValue } from "clsx"; import { twMerge } from "tailwind-merge"; -import { motion, AnimatePresence } from "framer-motion"; import { withErrorBoundary } from "./ErrorBoundary"; +import type { Job, JobDetail, TabType, SortField, ConfirmConfig, CountMessageResponse } from "./jobs/types"; +import { SORT_OPTIONS, isJobActive, jobDetailEmptyState } from "./jobs/types"; +import { normalizeDecisionExplanation, normalizeFailureExplanation } from "./jobs/JobExplanations"; +import { useJobSSE } from "./jobs/useJobSSE"; +import { JobsToolbar } from "./jobs/JobsToolbar"; +import { JobsTable } from "./jobs/JobsTable"; +import { JobDetailModal } from "./jobs/JobDetailModal"; function cn(...inputs: ClassValue[]) { return twMerge(clsx(inputs)); @@ -32,469 +35,26 @@ function focusableElements(root: HTMLElement): HTMLElement[] { ); } -export interface ExplanationView { - category: "decision" | "failure"; - code: string; - summary: string; - detail: string; - operator_guidance: string | null; - measured: Record; - legacy_reason: string; -} - -interface ExplanationPayload { - category: "decision" | "failure"; - code: string; - summary: string; - detail: string; - operator_guidance: string | null; - measured: Record; - legacy_reason: string; -} - -function formatReductionPercent(value?: string): string { - if (!value) { - return "?"; - } - - const parsed = Number.parseFloat(value); - return Number.isFinite(parsed) ? `${(parsed * 100).toFixed(0)}%` : value; -} - -export function humanizeSkipReason(reason: string): ExplanationView { - const pipeIdx = reason.indexOf("|"); - const key = pipeIdx === -1 - ? reason.trim() - : reason.slice(0, pipeIdx).trim(); - const paramStr = pipeIdx === -1 ? "" : reason.slice(pipeIdx + 1); - - const measured: Record = {}; - for (const pair of paramStr.split(",")) { - const [rawKey, ...rawValueParts] = pair.split("="); - if (!rawKey || rawValueParts.length === 0) { - continue; - } - - measured[rawKey.trim()] = rawValueParts.join("=").trim(); - } - - const makeDecision = ( - code: string, - summary: string, - detail: string, - operator_guidance: string | null, - ): ExplanationView => ({ - category: "decision", - code, - summary, - detail, - operator_guidance, - measured, - legacy_reason: reason, - }); - - switch (key) { - case "analysis_failed": - return makeDecision( - "analysis_failed", - "File could not be analyzed", - `FFprobe failed to read this file. It may be corrupt, incomplete, or in an unsupported format. Error: ${measured.error ?? "unknown"}`, - "Try playing the file in VLC or another media player. If it plays fine, re-run the scan. If not, the file may be damaged.", - ); - case "planning_failed": - return makeDecision( - "planning_failed", - "Transcoding plan could not be created", - `An internal error occurred while planning the transcode for this file. This is likely a bug. Error: ${measured.error ?? "unknown"}`, - "Check the logs below for details. If this happens repeatedly, please report it as a bug.", - ); - case "already_target_codec": - return makeDecision( - "already_target_codec", - "Already in target format", - `This file is already encoded as ${measured.codec ?? "the target codec"}${measured.bit_depth ? ` at ${measured.bit_depth}-bit` : ""}. Re-encoding would waste time and could reduce quality.`, - null, - ); - case "already_target_codec_wrong_container": - return makeDecision( - "already_target_codec_wrong_container", - "Target codec, wrong container", - `The video is already in the right codec but wrapped in a ${measured.container ?? "MP4"} container. Alchemist will remux it to ${measured.target_extension ?? "MKV"} - fast and lossless, no quality loss.`, - null, - ); - case "bpp_below_threshold": - return makeDecision( - "bpp_below_threshold", - "Already efficiently compressed", - `Bits-per-pixel (${measured.bpp ?? "?"}) is below the minimum threshold (${measured.threshold ?? "?"}). This file is already well-compressed - transcoding it would spend significant time for minimal space savings.`, - "If you want to force transcoding, lower the BPP threshold in Settings -> Transcoding.", - ); - case "below_min_file_size": - return makeDecision( - "below_min_file_size", - "File too small to process", - `File size (${measured.size_mb ?? "?"}MB) is below the minimum threshold (${measured.threshold_mb ?? "?"}MB). Small files aren't worth the transcoding overhead.`, - "Lower the minimum file size threshold in Settings -> Transcoding if you want small files processed.", - ); - case "size_reduction_insufficient": - return makeDecision( - "size_reduction_insufficient", - "Not enough space would be saved", - `The predicted size reduction (${formatReductionPercent(String(measured.reduction ?? measured.predicted ?? ""))}) is below the required threshold (${formatReductionPercent(String(measured.threshold ?? ""))}). Transcoding this file wouldn't recover meaningful storage.`, - "Lower the size reduction threshold in Settings -> Transcoding to encode files with smaller savings.", - ); - case "no_suitable_encoder": - case "no_available_encoders": - return makeDecision( - key, - "No encoder available", - `No encoder was found for ${measured.codec ?? measured.requested_codec ?? "the target codec"}. Hardware detection may have failed, or CPU fallback is disabled.`, - "Check Settings -> Hardware. Enable CPU fallback, or verify your GPU is detected correctly.", - ); - case "preferred_codec_unavailable_fallback_disabled": - return makeDecision( - "preferred_codec_unavailable_fallback_disabled", - "Preferred encoder unavailable", - `The preferred codec (${measured.codec ?? "target codec"}) is not available and CPU fallback is disabled in settings.`, - "Go to Settings -> Hardware and enable CPU fallback, or check that your GPU encoder is working correctly.", - ); - case "Output path matches input path": - case "output_path_matches_input": - return makeDecision( - "output_path_matches_input", - "Output would overwrite source", - "The configured output path is the same as the source file. Alchemist refused to proceed to avoid overwriting your original file.", - "Go to Settings -> Files and configure a different output suffix or output folder.", - ); - case "Output already exists": - case "output_already_exists": - return makeDecision( - "output_already_exists", - "Output file already exists", - "A transcoded version of this file already exists at the output path. Alchemist skipped it to avoid duplicating work.", - "If you want to re-transcode it, delete the existing output file first, then retry the job.", - ); - case "incomplete_metadata": - return makeDecision( - "incomplete_metadata", - "Missing file metadata", - `FFprobe could not determine the ${measured.missing ?? "required metadata"} for this file. Without reliable metadata Alchemist cannot make a valid transcoding decision.`, - "Run a Library Doctor scan to check if this file is corrupt. Try playing it in a media player to confirm it is readable.", - ); - case "already_10bit": - return makeDecision( - "already_10bit", - "Already 10-bit", - "This file is already encoded in high-quality 10-bit depth. Re-encoding it could reduce quality.", - null, - ); - case "remux: mp4_to_mkv_stream_copy": - case "remux_mp4_to_mkv_stream_copy": - return makeDecision( - "remux_mp4_to_mkv_stream_copy", - "Remuxed (no re-encode)", - "This file was remuxed from MP4 to MKV using stream copy - fast and lossless. No quality was lost.", - null, - ); - case "Low quality (VMAF)": - case "quality_below_threshold": - return makeDecision( - "quality_below_threshold", - "Quality check failed", - "The encoded file scored below the minimum VMAF quality threshold. Alchemist rejected the output to protect quality.", - "The original file has been preserved. You can lower the VMAF threshold in Settings -> Quality, or disable VMAF checking entirely.", - ); - case "transcode_h264_source": - return makeDecision( - "transcode_h264_source", - "H.264 source prioritized", - "This file is H.264, which is typically a strong candidate for reclaiming space, so Alchemist prioritized it for transcoding.", - null, - ); - case "transcode_recommended": - return makeDecision( - "transcode_recommended", - "Transcode recommended", - "Alchemist determined this file is a strong candidate for transcoding based on the current codec and measured efficiency.", - null, - ); - default: - return makeDecision("legacy_decision", "Decision recorded", reason, null); - } -} - -function explainFailureSummary(summary: string): ExplanationView { - const normalized = summary.toLowerCase(); - - const makeFailure = ( - code: string, - title: string, - detail: string, - operator_guidance: string | null, - ): ExplanationView => ({ - category: "failure", - code, - summary: title, - detail, - operator_guidance, - measured: {}, - legacy_reason: summary, - }); - - if (normalized.includes("cancelled")) { - return makeFailure( - "cancelled", - "Job was cancelled", - "This job was cancelled before encoding completed. The original file is untouched.", - null, - ); - } - if (normalized.includes("no such file or directory")) { - return makeFailure( - "source_missing", - "Source file missing", - "The source file could not be found. It may have been moved or deleted.", - "Check that the source file still exists and is readable by Alchemist.", - ); - } - if (normalized.includes("invalid data found") || normalized.includes("moov atom not found")) { - return makeFailure( - "corrupt_or_unreadable_media", - "Media could not be read", - "This file appears to be corrupt or incomplete. Try running a Library Doctor scan.", - "Verify the source file manually or run Library Doctor to confirm whether it is readable.", - ); - } - if (normalized.includes("permission denied")) { - return makeFailure( - "permission_denied", - "Permission denied", - "Alchemist doesn't have permission to read this file. Check the file permissions.", - "Check the file and output path permissions for the Alchemist process user.", - ); - } - if (normalized.includes("encoder not found") || normalized.includes("unknown encoder")) { - return makeFailure( - "encoder_unavailable", - "Required encoder unavailable", - "The required encoder is not available in your FFmpeg installation.", - "Check FFmpeg encoder availability and hardware settings.", - ); - } - if (normalized.includes("out of memory") || normalized.includes("cannot allocate memory")) { - return makeFailure( - "resource_exhausted", - "System ran out of memory", - "The system ran out of memory during encoding. Try reducing concurrent jobs.", - "Reduce concurrent jobs or rerun under lower system load.", - ); - } - if (normalized.includes("transcode_failed") || normalized.includes("ffmpeg exited")) { - return makeFailure( - "unknown_ffmpeg_failure", - "FFmpeg failed", - "FFmpeg failed during encoding. This is often caused by a corrupt source file or an encoder configuration issue. Check the logs below for the specific FFmpeg error.", - "Inspect the FFmpeg output in the job logs for the exact failure.", - ); - } - if (normalized.includes("probing failed")) { - return makeFailure( - "analysis_failed", - "Analysis failed", - "FFprobe could not read this file. It may be corrupt or in an unsupported format.", - "Inspect the source file manually or run Library Doctor to confirm whether it is readable.", - ); - } - if (normalized.includes("planning_failed") || normalized.includes("planner")) { - return makeFailure( - "planning_failed", - "Planner failed", - "An error occurred while planning the transcode. Check the logs below for details.", - "Treat repeated planner failures as a bug and inspect the logs for the triggering input.", - ); - } - if (normalized.includes("output_size=0") || normalized.includes("output was empty")) { - return makeFailure( - "unknown_ffmpeg_failure", - "Empty output produced", - "Encoding produced an empty output file. This usually means FFmpeg crashed silently. Check the logs below for FFmpeg output.", - "Inspect the FFmpeg logs before retrying the job.", - ); - } - if ( - normalized.includes("videotoolbox") || - normalized.includes("vt_compression") || - normalized.includes("err=-12902") || - normalized.includes("mediaserverd") || - normalized.includes("no capable devices") - ) { - return makeFailure( - "hardware_backend_failure", - "Hardware backend failed", - "The VideoToolbox hardware encoder failed. This can happen when the GPU is busy, the file uses an unsupported pixel format, or macOS Media Services are unavailable.", - "Retry the job. If it keeps failing, check the hardware probe log or enable CPU fallback in Settings -> Hardware.", - ); - } - if (normalized.includes("encoder fallback") || normalized.includes("fallback detected")) { - return makeFailure( - "fallback_blocked", - "Fallback blocked by policy", - "The hardware encoder was unavailable and fell back to software encoding, which was not allowed by your settings.", - "Enable CPU fallback in Settings -> Hardware, or retry when the GPU is less busy.", - ); - } - if (normalized.includes("ffmpeg failed")) { - return makeFailure( - "unknown_ffmpeg_failure", - "FFmpeg failed", - "FFmpeg failed during encoding. Check the logs below for the specific error. Common causes: unsupported pixel format, codec not available, or corrupt source file.", - "Inspect the FFmpeg output in the job logs for the exact failure.", - ); - } - - return makeFailure( - "legacy_failure", - "Failure recorded", - summary, - "Inspect the job logs for additional context.", +function getStatusBadge(status: string) { + const styles: Record = { + queued: "bg-helios-slate/10 text-helios-slate border-helios-slate/20", + analyzing: "bg-blue-500/10 text-blue-500 border-blue-500/20", + encoding: "bg-helios-solar/10 text-helios-solar border-helios-solar/20 animate-pulse", + remuxing: "bg-helios-solar/10 text-helios-solar border-helios-solar/20 animate-pulse", + completed: "bg-green-500/10 text-green-500 border-green-500/20", + failed: "bg-red-500/10 text-red-500 border-red-500/20", + cancelled: "bg-red-500/10 text-red-500 border-red-500/20", + skipped: "bg-gray-500/10 text-gray-500 border-gray-500/20", + archived: "bg-zinc-500/10 text-zinc-400 border-zinc-500/20", + resuming: "bg-helios-solar/10 text-helios-solar border-helios-solar/20 animate-pulse", + }; + return ( + + {status} + ); } -function normalizeDecisionExplanation( - explanation: ExplanationPayload | null | undefined, - legacyReason?: string | null, -): ExplanationView | null { - if (explanation) { - return explanation; - } - if (legacyReason) { - return humanizeSkipReason(legacyReason); - } - return null; -} - -function normalizeFailureExplanation( - explanation: ExplanationPayload | null | undefined, - legacySummary?: string | null, -): ExplanationView | null { - if (explanation) { - return explanation; - } - if (legacySummary) { - return explainFailureSummary(legacySummary); - } - return null; -} - -function logLevelClass(level: string): string { - switch (level.toLowerCase()) { - case "error": - return "text-status-error"; - case "warn": - case "warning": - return "text-helios-solar"; - default: - return "text-helios-slate"; - } -} - -interface Job { - id: number; - input_path: string; - output_path: string; - status: string; - priority: number; - progress: number; - created_at: string; - updated_at: string; - attempt_count: number; - vmaf_score?: number; - decision_reason?: string; - decision_explanation?: ExplanationPayload | null; - encoder?: string; -} - -function retryCountdown(job: Job): string | null { - if (job.status !== "failed") return null; - if (!job.attempt_count || job.attempt_count === 0) return null; - - const backoffMins = - job.attempt_count === 1 ? 5 - : job.attempt_count === 2 ? 15 - : job.attempt_count === 3 ? 60 - : 360; - - const updatedMs = new Date(job.updated_at).getTime(); - const retryAtMs = updatedMs + backoffMins * 60 * 1000; - const remainingMs = retryAtMs - Date.now(); - - if (remainingMs <= 0) return "Retrying soon"; - - const remainingMins = Math.ceil(remainingMs / 60_000); - if (remainingMins < 60) return `Retrying in ${remainingMins}m`; - const hrs = Math.floor(remainingMins / 60); - const mins = remainingMins % 60; - return mins > 0 ? `Retrying in ${hrs}h ${mins}m` : `Retrying in ${hrs}h`; -} - -interface JobMetadata { - duration_secs: number; - codec_name: string; - width: number; - height: number; - bit_depth?: number; - size_bytes: number; - video_bitrate_bps?: number; - container_bitrate_bps?: number; - fps: number; - container: string; - audio_codec?: string; - audio_channels?: number; - dynamic_range?: string; -} - -interface EncodeStats { - input_size_bytes: number; - output_size_bytes: number; - compression_ratio: number; - encode_time_seconds: number; - encode_speed: number; - avg_bitrate_kbps: number; - vmaf_score?: number; -} - -interface LogEntry { - id: number; - level: string; - message: string; - created_at: string; -} - -interface JobDetail { - job: Job; - metadata: JobMetadata | null; - encode_stats: EncodeStats | null; - job_logs: LogEntry[]; - job_failure_summary: string | null; - decision_explanation: ExplanationPayload | null; - failure_explanation: ExplanationPayload | null; -} - -interface CountMessageResponse { - count: number; - message: string; -} - -type TabType = "all" | "active" | "queued" | "completed" | "failed" | "skipped" | "archived"; -type SortField = "updated_at" | "created_at" | "input_path" | "size"; - -const SORT_OPTIONS: Array<{ value: SortField; label: string }> = [ - { value: "updated_at", label: "Last Updated" }, - { value: "created_at", label: "Date Added" }, - { value: "input_path", label: "File Name" }, - { value: "size", label: "File Size" }, -]; - function JobManager() { const [jobs, setJobs] = useState([]); const [loading, setLoading] = useState(true); @@ -518,13 +78,7 @@ function JobManager() { const compactSearchInputRef = useRef(null); const confirmOpenRef = useRef(false); const encodeStartTimes = useRef>(new Map()); - const [confirmState, setConfirmState] = useState<{ - title: string; - body: string; - confirmLabel: string; - confirmTone?: "danger" | "primary"; - onConfirm: () => Promise | void; - } | null>(null); + const [confirmState, setConfirmState] = useState(null); const [tick, setTick] = useState(0); useEffect(() => { @@ -569,8 +123,6 @@ function JobManager() { }; }, [compactSearchOpen, searchInput]); - const isJobActive = (job: Job) => ["analyzing", "encoding", "remuxing", "resuming"].includes(job.status); - const formatJobActionError = (error: unknown, fallback: string) => { if (!isApiError(error)) { return fallback; @@ -589,7 +141,6 @@ function JobManager() { return `${error.message}: ${summary}`; }; - // Filter mapping const getStatusFilter = (tab: TabType) => { switch (tab) { case "active": return ["analyzing", "encoding", "remuxing", "resuming"]; @@ -634,12 +185,8 @@ function JobManager() { terminal.includes(local.status) && serverIsTerminal ) { - // Both agree this is terminal — keep - // local status to prevent SSE→poll flicker. return { ...serverJob, status: local.status }; } - // Server says it changed (e.g. retry queued it) - // — trust the server. return serverJob; }) ); @@ -685,94 +232,7 @@ function JobManager() { }; }, []); - useEffect(() => { - let eventSource: EventSource | null = null; - let cancelled = false; - let reconnectTimeout: number | null = null; - let reconnectAttempts = 0; - - const getReconnectDelay = () => { - // Exponential backoff: 1s, 2s, 4s, 8s, 16s, max 30s - const baseDelay = 1000; - const maxDelay = 30000; - const delay = Math.min(baseDelay * Math.pow(2, reconnectAttempts), maxDelay); - // Add jitter (±25%) to prevent thundering herd - const jitter = delay * 0.25 * (Math.random() * 2 - 1); - return Math.round(delay + jitter); - }; - - const connect = () => { - if (cancelled) return; - eventSource?.close(); - eventSource = new EventSource("/api/events"); - - eventSource.onopen = () => { - // Reset reconnect attempts on successful connection - reconnectAttempts = 0; - }; - - eventSource.addEventListener("status", (e) => { - try { - const { job_id, status } = JSON.parse(e.data) as { - job_id: number; - status: string; - }; - if (status === "encoding") { - encodeStartTimes.current.set(job_id, Date.now()); - } else { - encodeStartTimes.current.delete(job_id); - } - setJobs((prev) => - prev.map((job) => - job.id === job_id ? { ...job, status } : job - ) - ); - } catch { - /* ignore malformed */ - } - }); - - eventSource.addEventListener("progress", (e) => { - try { - const { job_id, percentage } = JSON.parse(e.data) as { - job_id: number; - percentage: number; - }; - setJobs((prev) => - prev.map((job) => - job.id === job_id ? { ...job, progress: percentage } : job - ) - ); - } catch { - /* ignore malformed */ - } - }); - - eventSource.addEventListener("decision", () => { - // Re-fetch full job list when decisions are made - void fetchJobsRef.current(); - }); - - eventSource.onerror = () => { - eventSource?.close(); - if (!cancelled) { - reconnectAttempts++; - const delay = getReconnectDelay(); - reconnectTimeout = window.setTimeout(connect, delay); - } - }; - }; - - connect(); - - return () => { - cancelled = true; - eventSource?.close(); - if (reconnectTimeout !== null) { - window.clearTimeout(reconnectTimeout); - } - }; - }, []); + useJobSSE({ setJobs, fetchJobsRef, encodeStartTimes }); useEffect(() => { const encodingJobIds = new Set(); @@ -1008,74 +468,7 @@ function JobManager() { } }; - const formatBytes = (bytes: number) => { - if (bytes === 0) return "0 B"; - const k = 1024; - const sizes = ["B", "KB", "MB", "GB", "TB"]; - const i = Math.floor(Math.log(bytes) / Math.log(k)); - return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i]; - }; - - const formatDuration = (seconds: number) => { - const h = Math.floor(seconds / 3600); - const m = Math.floor((seconds % 3600) / 60); - const s = Math.floor(seconds % 60); - return [h, m, s].map(v => v.toString().padStart(2, "0")).join(":"); - }; - - const calcEta = (jobId: number, progress: number): string | null => { - if (progress <= 0 || progress >= 100) { - return null; - } - - const startMs = encodeStartTimes.current.get(jobId); - if (!startMs) { - return null; - } - - const elapsedMs = Date.now() - startMs; - const totalMs = elapsedMs / (progress / 100); - const remainingMs = totalMs - elapsedMs; - const remainingSecs = Math.round(remainingMs / 1000); - - if (remainingSecs < 0) { - return null; - } - if (remainingSecs < 60) { - return `~${remainingSecs}s remaining`; - } - - const mins = Math.ceil(remainingSecs / 60); - return `~${mins} min remaining`; - }; - - const getStatusBadge = (status: string) => { - const styles: Record = { - queued: "bg-helios-slate/10 text-helios-slate border-helios-slate/20", - analyzing: "bg-blue-500/10 text-blue-500 border-blue-500/20", - encoding: "bg-helios-solar/10 text-helios-solar border-helios-solar/20 animate-pulse", - remuxing: "bg-helios-solar/10 text-helios-solar border-helios-solar/20 animate-pulse", - completed: "bg-green-500/10 text-green-500 border-green-500/20", - failed: "bg-red-500/10 text-red-500 border-red-500/20", - cancelled: "bg-red-500/10 text-red-500 border-red-500/20", - skipped: "bg-gray-500/10 text-gray-500 border-gray-500/20", - archived: "bg-zinc-500/10 text-zinc-400 border-zinc-500/20", - resuming: "bg-helios-solar/10 text-helios-solar border-helios-solar/20 animate-pulse", - }; - return ( - - {status} - - ); - }; - - const openConfirm = (config: { - title: string; - body: string; - confirmLabel: string; - confirmTone?: "danger" | "primary"; - onConfirm: () => Promise | void; - }) => { + const openConfirm = (config: ConfirmConfig) => { setConfirmState(config); }; @@ -1089,6 +482,7 @@ function JobManager() { ? normalizeFailureExplanation( focusedJob.failure_explanation, focusedJob.job_failure_summary, + focusedJob.job_logs, ) : null; const focusedJobLogs = focusedJob?.job_logs ?? []; @@ -1098,130 +492,44 @@ function JobManager() { const completedEncodeStats = focusedJob?.job.status === "completed" ? focusedJob.encode_stats : null; + const focusedEmptyState = focusedJob + ? jobDetailEmptyState(focusedJob.job.status) + : null; return (
- - {activeCount} - + {activeCount} {" "}active - - {failedCount} - + {failedCount} {" "}failed - - {completedCount} - + {completedCount} {" "}completed
- {/* Toolbar */} -
-
- {(["all", "active", "queued", "completed", "failed", "skipped", "archived"] as TabType[]).map((tab) => ( - - ))} -
- -
-
- - setSearchInput(e.target.value)} - className="w-full bg-helios-surface border border-helios-line/20 rounded-lg pl-9 pr-4 py-2 text-sm text-helios-ink focus:border-helios-solar outline-none" - /> -
-
- - -
- -
-
- - setSearchInput(e.target.value)} - className={cn( - "min-w-0 bg-transparent text-sm text-helios-ink outline-none placeholder:text-helios-slate transition-all duration-200", - compactSearchOpen - ? "ml-1 w-full opacity-100" - : "w-0 opacity-0 pointer-events-none" - )} - /> -
-
-
-
+ {actionError && (
@@ -1293,238 +601,24 @@ function JobManager() {
)} - {/* Table */} -
- - - - - - - - - - - - - {loading && jobs.length === 0 ? ( - Array.from({ length: 5 }).map((_, index) => ( - - - - )) - ) : jobs.length === 0 ? ( - - - - ) : ( - jobs.map((job) => ( - void fetchJobDetails(job.id)} - className={cn( - "group hover:bg-helios-surface/80 transition-all cursor-pointer", - selected.has(job.id) && "bg-helios-surface-soft", - focusedJob?.job.id === job.id && "bg-helios-solar/5" - )} - > - - - - - - - - )) - )} - -
- 0 && jobs.every(j => selected.has(j.id))} - onChange={toggleSelectAll} - className="rounded border-helios-line/30 bg-helios-surface-soft accent-helios-solar" - /> - FileStatusProgressUpdated
-
-
- No jobs found -
e.stopPropagation()}> - toggleSelect(job.id)} - className="rounded border-helios-line/30 bg-helios-surface-soft accent-helios-solar" - /> - - - - {job.input_path.split(/[/\\]/).pop()} - -
- - {job.input_path} - - - P{job.priority} - -
-
-
- - {getStatusBadge(job.status)} - - {job.status === "failed" && (() => { - // Reference tick so React re-renders countdowns on interval - void tick; - const countdown = retryCountdown(job); - return countdown ? ( -

- {countdown} -

- ) : null; - })()} -
- {["encoding", "analyzing", "remuxing"].includes(job.status) ? ( -
-
-
-
-
- {job.progress.toFixed(1)}% -
- {job.status === "encoding" && (() => { - const eta = calcEta(job.id, job.progress); - return eta ? ( -

- {eta} -

- ) : null; - })()} - {job.status === "encoding" && job.encoder && ( - - {job.encoder} - - )} -
- ) : ( - job.vmaf_score ? ( - - VMAF: {job.vmaf_score.toFixed(1)} - - ) : ( - - - ) - )} -
- {new Date(job.updated_at).toLocaleString()} - e.stopPropagation()}> -
- - - {menuJobId === job.id && ( - - - - - - {(job.status === "failed" || job.status === "cancelled") && ( - - )} - {["encoding", "analyzing", "remuxing"].includes(job.status) && ( - - )} - {!isJobActive(job) && ( - - )} - - )} - -
-
-
+ {/* Footer Actions */}
@@ -1545,431 +639,24 @@ function JobManager() {
- {/* Detail Overlay - rendered via portal to escape layout constraints */} + {/* Detail Overlay */} {typeof document !== "undefined" && createPortal( - - {focusedJob && ( - <> - setFocusedJob(null)} - className="fixed inset-0 bg-black/60 backdrop-blur-sm z-[100]" - /> -
- - {/* Header */} -
-
-
- {getStatusBadge(focusedJob.job.status)} - Job ID #{focusedJob.job.id} - Priority {focusedJob.job.priority} -
-

- {focusedJob.job.input_path.split(/[/\\]/).pop()} -

-

{focusedJob.job.input_path}

-
- -
- -
- {detailLoading && ( -

Loading job details...

- )} - {focusedJob.metadata || completedEncodeStats ? ( - <> - {focusedJob.metadata && ( - <> - {/* Stats Grid */} -
-
-
- - Video Codec -
-

- {focusedJob.metadata.codec_name || "Unknown"} -

-

- {(focusedJob.metadata.bit_depth ? `${focusedJob.metadata.bit_depth}-bit` : "Unknown bit depth")} • {focusedJob.metadata.container.toUpperCase()} -

-
- -
-
- - Resolution -
-

- {`${focusedJob.metadata.width}x${focusedJob.metadata.height}`} -

-

- {focusedJob.metadata.fps.toFixed(2)} FPS -

-
- -
-
- - Duration -
-

- {formatDuration(focusedJob.metadata.duration_secs)} -

-
-
- - {/* Media Details */} -
-
-

- Input Details -

-
-
- File Size - {formatBytes(focusedJob.metadata.size_bytes)} -
-
- Video Bitrate - - {(focusedJob.metadata.video_bitrate_bps ?? focusedJob.metadata.container_bitrate_bps) - ? `${(((focusedJob.metadata.video_bitrate_bps ?? focusedJob.metadata.container_bitrate_bps) as number) / 1000).toFixed(0)} kbps` - : "-"} - -
-
- Audio - - {focusedJob.metadata.audio_codec || "N/A"} ({focusedJob.metadata.audio_channels || 0}ch) - -
-
-
- -
-

- Output Details -

- {focusedJob.encode_stats ? ( -
-
- Result Size - {formatBytes(focusedJob.encode_stats.output_size_bytes)} -
-
- Reduction - - {((1 - focusedJob.encode_stats.compression_ratio) * 100).toFixed(1)}% Saved - -
-
- VMAF Score -
-
-
-
- - {focusedJob.encode_stats.vmaf_score?.toFixed(1) || "-"} - -
-
-
- ) : ( -
- {focusedJob.job.status === "encoding" - ? "Encoding in progress..." - : focusedJob.job.status === "remuxing" - ? "Remuxing in progress..." - : "No encode data available"} -
- )} -
-
- - )} - - {completedEncodeStats && ( -
-

- Encode Results -

-
-
- Input size - {formatBytes(completedEncodeStats.input_size_bytes)} -
-
- Output size - {formatBytes(completedEncodeStats.output_size_bytes)} -
-
- Reduction - - {completedEncodeStats.input_size_bytes > 0 - ? `${((1 - completedEncodeStats.output_size_bytes / completedEncodeStats.input_size_bytes) * 100).toFixed(1)}% saved` - : "—"} - -
-
- Encode time - {formatDuration(completedEncodeStats.encode_time_seconds)} -
-
- Speed - {`${completedEncodeStats.encode_speed.toFixed(2)}\u00d7 realtime`} -
-
- Avg bitrate - {`${completedEncodeStats.avg_bitrate_kbps} kbps`} -
-
- VMAF - {completedEncodeStats.vmaf_score?.toFixed(1) ?? "—"} -
-
-
- )} - - ) : ( -
-
- -
-
-

- Waiting for analysis -

-

- Metadata will appear once this job is picked up by the engine. -

-
-
- )} - - {/* Decision Info */} - {focusedDecision && focusedJob.job.status !== "failed" && focusedJob.job.status !== "skipped" && ( -
-
- - Decision Context -
-
-

- {focusedJob.job.status === "completed" - ? "Transcoded" - : focusedDecision.summary} -

-

- {focusedDecision.detail} -

- {Object.keys(focusedDecision.measured).length > 0 && ( -
- {Object.entries(focusedDecision.measured).map(([k, v]) => ( -
- {k} - {String(v)} -
- ))} -
- )} - {focusedDecision.operator_guidance && ( -
- - {focusedDecision.operator_guidance} - -
- )} -
-
- )} - - {focusedJob.job.status === "skipped" && focusedDecision && ( -
-

- Alchemist analysed this file and decided not to transcode it. Here's why: -

-
-

- {focusedDecision.summary} -

-

- {focusedDecision.detail} -

- {Object.keys(focusedDecision.measured).length > 0 && ( -
- {Object.entries(focusedDecision.measured).map(([k, v]) => ( -
- {k} - {String(v)} -
- ))} -
- )} - {focusedDecision.operator_guidance && ( -
- - {focusedDecision.operator_guidance} - -
- )} -
-
- )} - - {focusedJob.job.status === "failed" && ( -
-
- - - Failure Reason - -
- {focusedFailure ? ( - <> -

- {focusedFailure.summary} -

-

- {focusedFailure.detail} -

- {focusedFailure.operator_guidance && ( -

- {focusedFailure.operator_guidance} -

- )} - {focusedFailure.legacy_reason !== focusedFailure.detail && ( -

- {focusedFailure.legacy_reason} -

- )} - - ) : ( -

- No error details captured. Check the logs below. -

- )} -
- )} - - {shouldShowFfmpegOutput && ( -
- - Show FFmpeg output ({focusedJobLogs.length} lines) - -
- {focusedJobLogs.map((entry) => ( -
- {entry.message} -
- ))} -
-
- )} - - {/* Action Toolbar */} -
-
- - - - {(focusedJob.job.status === 'failed' || focusedJob.job.status === 'cancelled') && ( - - )} - {["encoding", "analyzing", "remuxing"].includes(focusedJob.job.status) && ( - - )} -
- {!isJobActive(focusedJob.job) && ( - - )} -
-
- -
- - )} - , + setFocusedJob(null)} + focusedDecision={focusedDecision} + focusedFailure={focusedFailure} + focusedJobLogs={focusedJobLogs} + shouldShowFfmpegOutput={shouldShowFfmpegOutput} + completedEncodeStats={completedEncodeStats} + focusedEmptyState={focusedEmptyState} + openConfirm={openConfirm} + handleAction={handleAction} + handlePriority={handlePriority} + getStatusBadge={getStatusBadge} + />, document.body )} diff --git a/web/src/components/SavingsOverview.tsx b/web/src/components/SavingsOverview.tsx index 49b4c1b..e084391 100644 --- a/web/src/components/SavingsOverview.tsx +++ b/web/src/components/SavingsOverview.tsx @@ -14,24 +14,24 @@ import { apiJson, isApiError } from "../lib/api"; import { showToast } from "../lib/toast"; interface CodecSavings { - codec: string; bytes_saved: number; + codec: string; job_count: number; } interface DailySavings { - date: string; bytes_saved: number; + date: string; } interface SavingsSummary { - total_input_bytes: number; - total_output_bytes: number; - total_bytes_saved: number; - savings_percent: number; job_count: number; savings_by_codec: CodecSavings[]; savings_over_time: DailySavings[]; + savings_percent: number; + total_bytes_saved: number; + total_input_bytes: number; + total_output_bytes: number; } const GIB = 1_073_741_824; diff --git a/web/src/components/ScheduleSettings.tsx b/web/src/components/ScheduleSettings.tsx index 55b5b24..0d44cfd 100644 --- a/web/src/components/ScheduleSettings.tsx +++ b/web/src/components/ScheduleSettings.tsx @@ -5,11 +5,11 @@ import { showToast } from "../lib/toast"; import ConfirmDialog from "./ui/ConfirmDialog"; interface ScheduleWindow { - id: number; - start_time: string; - end_time: string; days_of_week: string; enabled: boolean; + end_time: string; + id: number; + start_time: string; } const DAYS = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]; diff --git a/web/src/components/jobs/JobDetailModal.tsx b/web/src/components/jobs/JobDetailModal.tsx new file mode 100644 index 0000000..8b88ddc --- /dev/null +++ b/web/src/components/jobs/JobDetailModal.tsx @@ -0,0 +1,511 @@ +import { X, Clock, Info, Activity, Database, Zap, Maximize2, AlertCircle, RefreshCw, Ban, Trash2 } from "lucide-react"; +import { motion, AnimatePresence } from "framer-motion"; +import { clsx, type ClassValue } from "clsx"; +import { twMerge } from "tailwind-merge"; +import type { RefObject } from "react"; +import type React from "react"; +import type { JobDetail, EncodeStats, ExplanationView, LogEntry, ConfirmConfig, Job } from "./types"; +import { formatBytes, formatDuration, logLevelClass, isJobActive } from "./types"; + +function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} + +interface JobDetailModalProps { + focusedJob: JobDetail | null; + detailDialogRef: RefObject; + detailLoading: boolean; + onClose: () => void; + focusedDecision: ExplanationView | null; + focusedFailure: ExplanationView | null; + focusedJobLogs: LogEntry[]; + shouldShowFfmpegOutput: boolean; + completedEncodeStats: EncodeStats | null; + focusedEmptyState: { title: string; detail: string } | null; + openConfirm: (config: ConfirmConfig) => void; + handleAction: (id: number, action: "cancel" | "restart" | "delete") => Promise; + handlePriority: (job: Job, priority: number, label: string) => Promise; + getStatusBadge: (status: string) => React.ReactElement; +} + +export function JobDetailModal({ + focusedJob, detailDialogRef, detailLoading, onClose, + focusedDecision, focusedFailure, focusedJobLogs, shouldShowFfmpegOutput, + completedEncodeStats, focusedEmptyState, + openConfirm, handleAction, handlePriority, getStatusBadge, +}: JobDetailModalProps) { + return ( + + {focusedJob && ( + <> + +
+ } + role="dialog" + aria-modal="true" + aria-labelledby="job-details-title" + aria-describedby="job-details-path" + tabIndex={-1} + className="w-full max-w-2xl bg-helios-surface border border-helios-line/20 rounded-lg shadow-2xl pointer-events-auto overflow-hidden mx-4" + > + {/* Header */} +
+
+
+ {getStatusBadge(focusedJob.job.status)} + Job ID #{focusedJob.job.id} + Priority {focusedJob.job.priority} +
+

+ {focusedJob.job.input_path.split(/[/\\]/).pop()} +

+

{focusedJob.job.input_path}

+
+ +
+ +
+ {detailLoading && ( +

Loading job details...

+ )} + {/* Active-encode status banner */} + {focusedEmptyState && (focusedJob.job.status === "encoding" || focusedJob.job.status === "remuxing") && ( +
+
+ +
+

{focusedEmptyState.title}

+
+ )} + + {focusedJob.metadata || completedEncodeStats ? ( + <> + {focusedJob.metadata && ( + <> + {/* Stats Grid */} +
+
+
+ + Video Codec +
+

+ {focusedJob.metadata.codec_name || "Unknown"} +

+

+ {(focusedJob.metadata.bit_depth ? `${focusedJob.metadata.bit_depth}-bit` : "Unknown bit depth")} • {focusedJob.metadata.container.toUpperCase()} +

+
+ +
+
+ + Resolution +
+

+ {`${focusedJob.metadata.width}x${focusedJob.metadata.height}`} +

+

+ {focusedJob.metadata.fps.toFixed(2)} FPS +

+
+ +
+
+ + Duration +
+

+ {formatDuration(focusedJob.metadata.duration_secs)} +

+
+
+ + {/* Media Details */} +
+
+

+ Input Details +

+
+
+ File Size + {formatBytes(focusedJob.metadata.size_bytes)} +
+
+ Video Bitrate + + {(focusedJob.metadata.video_bitrate_bps ?? focusedJob.metadata.container_bitrate_bps) + ? `${(((focusedJob.metadata.video_bitrate_bps ?? focusedJob.metadata.container_bitrate_bps) as number) / 1000).toFixed(0)} kbps` + : "-"} + +
+
+ Audio + + {focusedJob.metadata.audio_codec || "N/A"} ({focusedJob.metadata.audio_channels || 0}ch) + +
+
+
+ +
+

+ Output Details +

+ {focusedJob.encode_stats ? ( +
+
+ Result Size + {formatBytes(focusedJob.encode_stats.output_size_bytes)} +
+
+ Reduction + + {((1 - focusedJob.encode_stats.compression_ratio) * 100).toFixed(1)}% Saved + +
+
+ VMAF Score +
+
+
+
+ + {focusedJob.encode_stats.vmaf_score?.toFixed(1) || "-"} + +
+
+
+ ) : ( +
+ {focusedJob.job.status === "encoding" + ? "Encoding in progress..." + : focusedJob.job.status === "remuxing" + ? "Remuxing in progress..." + : "No encode data available"} +
+ )} +
+
+ + )} + + {completedEncodeStats && ( +
+

+ Encode Results +

+
+
+ Input size + {formatBytes(completedEncodeStats.input_size_bytes)} +
+
+ Output size + {formatBytes(completedEncodeStats.output_size_bytes)} +
+
+ Reduction + + {completedEncodeStats.input_size_bytes > 0 + ? `${((1 - completedEncodeStats.output_size_bytes / completedEncodeStats.input_size_bytes) * 100).toFixed(1)}% saved` + : "—"} + +
+
+ Encode time + {formatDuration(completedEncodeStats.encode_time_seconds)} +
+
+ Speed + {`${completedEncodeStats.encode_speed.toFixed(2)}\u00d7 realtime`} +
+
+ Avg bitrate + {`${completedEncodeStats.avg_bitrate_kbps} kbps`} +
+
+ VMAF + {completedEncodeStats.vmaf_score?.toFixed(1) ?? "—"} +
+
+
+ )} + + ) : focusedEmptyState ? ( +
+
+ +
+
+

+ {focusedEmptyState.title} +

+

+ {focusedEmptyState.detail} +

+
+
+ ) : null} + + {/* Decision Info */} + {focusedDecision && focusedJob.job.status !== "failed" && focusedJob.job.status !== "skipped" && ( +
+
+ + Decision Context +
+
+

+ {focusedJob.job.status === "completed" + ? "Transcoded" + : focusedDecision.summary} +

+

+ {focusedDecision.detail} +

+ {Object.keys(focusedDecision.measured).length > 0 && ( +
+ {Object.entries(focusedDecision.measured).map(([k, v]) => ( +
+ {k} + {String(v)} +
+ ))} +
+ )} + {focusedDecision.operator_guidance && ( +
+ + {focusedDecision.operator_guidance} + +
+ )} +
+
+ )} + + {focusedJob.job.status === "skipped" && focusedDecision && ( +
+

+ Alchemist analysed this file and decided not to transcode it. Here's why: +

+
+

+ {focusedDecision.summary} +

+

+ {focusedDecision.detail} +

+ {Object.keys(focusedDecision.measured).length > 0 && ( +
+ {Object.entries(focusedDecision.measured).map(([k, v]) => ( +
+ {k} + {String(v)} +
+ ))} +
+ )} + {focusedDecision.operator_guidance && ( +
+ + {focusedDecision.operator_guidance} + +
+ )} +
+
+ )} + + {focusedJob.job.status === "failed" && ( +
+
+ + + Failure Reason + +
+ {focusedFailure ? ( + <> +

+ {focusedFailure.summary} +

+

+ {focusedFailure.detail} +

+ {focusedFailure.operator_guidance && ( +

+ {focusedFailure.operator_guidance} +

+ )} + {focusedFailure.legacy_reason !== focusedFailure.detail && ( +

+ {focusedFailure.legacy_reason} +

+ )} + + ) : ( +

+ No error details captured. Check the logs below. +

+ )} +
+ )} + + {(focusedJob.encode_attempts ?? []).length > 0 && ( +
+ + Attempt History ({(focusedJob.encode_attempts ?? []).length}) + +
+ {(focusedJob.encode_attempts ?? []).map((attempt) => ( +
+ #{attempt.attempt_number} +
+
+ {attempt.outcome} + {attempt.encode_time_seconds != null && ( + {attempt.encode_time_seconds < 60 + ? `${attempt.encode_time_seconds.toFixed(1)}s` + : `${(attempt.encode_time_seconds / 60).toFixed(1)}m`} + )} + {attempt.input_size_bytes != null && attempt.output_size_bytes != null && ( + + {formatBytes(attempt.input_size_bytes)} → {formatBytes(attempt.output_size_bytes)} + + )} +
+ {attempt.failure_summary && ( +

{attempt.failure_summary}

+ )} +

{new Date(attempt.finished_at).toLocaleString()}

+
+
+ ))} +
+
+ )} + + {shouldShowFfmpegOutput && ( +
+ + Show FFmpeg output ({focusedJobLogs.length} lines) + +
+ {focusedJobLogs.map((entry) => ( +
+ {entry.message} +
+ ))} +
+
+ )} + + {/* Action Toolbar */} +
+
+ + + + {(focusedJob.job.status === "failed" || focusedJob.job.status === "cancelled") && ( + + )} + {["encoding", "analyzing", "remuxing"].includes(focusedJob.job.status) && ( + + )} +
+ {!isJobActive(focusedJob.job) && ( + + )} +
+
+ +
+ + )} + + ); +} diff --git a/web/src/components/jobs/JobExplanations.ts b/web/src/components/jobs/JobExplanations.ts new file mode 100644 index 0000000..7e2040a --- /dev/null +++ b/web/src/components/jobs/JobExplanations.ts @@ -0,0 +1,303 @@ +import type { ExplanationView, LogEntry } from "./types"; + +function formatReductionPercent(value?: string): string { + if (!value) return "?"; + const parsed = Number.parseFloat(value); + return Number.isFinite(parsed) ? `${(parsed * 100).toFixed(0)}%` : value; +} + +export function humanizeSkipReason(reason: string): ExplanationView { + const pipeIdx = reason.indexOf("|"); + const key = pipeIdx === -1 + ? reason.trim() + : reason.slice(0, pipeIdx).trim(); + const paramStr = pipeIdx === -1 ? "" : reason.slice(pipeIdx + 1); + + const measured: Record = {}; + for (const pair of paramStr.split(",")) { + const [rawKey, ...rawValueParts] = pair.split("="); + if (!rawKey || rawValueParts.length === 0) continue; + measured[rawKey.trim()] = rawValueParts.join("=").trim(); + } + + const makeDecision = ( + code: string, + summary: string, + detail: string, + operator_guidance: string | null, + ): ExplanationView => ({ + category: "decision", + code, + summary, + detail, + operator_guidance, + measured, + legacy_reason: reason, + }); + + switch (key) { + case "analysis_failed": + return makeDecision( + "analysis_failed", + "File could not be analyzed", + `FFprobe failed to read this file. It may be corrupt, incomplete, or in an unsupported format. Error: ${measured.error ?? "unknown"}`, + "Try playing the file in VLC or another media player. If it plays fine, re-run the scan. If not, the file may be damaged.", + ); + case "planning_failed": + return makeDecision( + "planning_failed", + "Transcoding plan could not be created", + `An internal error occurred while planning the transcode for this file. This is likely a bug. Error: ${measured.error ?? "unknown"}`, + "Check the logs below for details. If this happens repeatedly, please report it as a bug.", + ); + case "already_target_codec": + return makeDecision( + "already_target_codec", + "Already in target format", + `This file is already encoded as ${measured.codec ?? "the target codec"}${measured.bit_depth ? ` at ${measured.bit_depth}-bit` : ""}. Re-encoding would waste time and could reduce quality.`, + null, + ); + case "already_target_codec_wrong_container": + return makeDecision( + "already_target_codec_wrong_container", + "Target codec, wrong container", + `The video is already in the right codec but wrapped in a ${measured.container ?? "MP4"} container. Alchemist will remux it to ${measured.target_extension ?? "MKV"} - fast and lossless, no quality loss.`, + null, + ); + case "bpp_below_threshold": + return makeDecision( + "bpp_below_threshold", + "Already efficiently compressed", + `Bits-per-pixel (${measured.bpp ?? "?"}) is below the minimum threshold (${measured.threshold ?? "?"}). This file is already well-compressed - transcoding it would spend significant time for minimal space savings.`, + "If you want to force transcoding, lower the BPP threshold in Settings -> Transcoding.", + ); + case "below_min_file_size": + return makeDecision( + "below_min_file_size", + "File too small to process", + `File size (${measured.size_mb ?? "?"}MB) is below the minimum threshold (${measured.threshold_mb ?? "?"}MB). Small files aren't worth the transcoding overhead.`, + "Lower the minimum file size threshold in Settings -> Transcoding if you want small files processed.", + ); + case "size_reduction_insufficient": + return makeDecision( + "size_reduction_insufficient", + "Not enough space would be saved", + `The predicted size reduction (${formatReductionPercent(String(measured.reduction ?? measured.predicted ?? ""))}) is below the required threshold (${formatReductionPercent(String(measured.threshold ?? ""))}). Transcoding this file wouldn't recover meaningful storage.`, + "Lower the size reduction threshold in Settings -> Transcoding to encode files with smaller savings.", + ); + case "no_suitable_encoder": + case "no_available_encoders": + return makeDecision( + key, + "No encoder available", + `No encoder was found for ${measured.codec ?? measured.requested_codec ?? "the target codec"}. Hardware detection may have failed, or CPU fallback is disabled.`, + "Check Settings -> Hardware. Enable CPU fallback, or verify your GPU is detected correctly.", + ); + case "preferred_codec_unavailable_fallback_disabled": + return makeDecision( + "preferred_codec_unavailable_fallback_disabled", + "Preferred encoder unavailable", + `The preferred codec (${measured.codec ?? "target codec"}) is not available and CPU fallback is disabled in settings.`, + "Go to Settings -> Hardware and enable CPU fallback, or check that your GPU encoder is working correctly.", + ); + case "Output path matches input path": + case "output_path_matches_input": + return makeDecision( + "output_path_matches_input", + "Output would overwrite source", + "The configured output path is the same as the source file. Alchemist refused to proceed to avoid overwriting your original file.", + "Go to Settings -> Files and configure a different output suffix or output folder.", + ); + case "Output already exists": + case "output_already_exists": + return makeDecision( + "output_already_exists", + "Output file already exists", + "A transcoded version of this file already exists at the output path. Alchemist skipped it to avoid duplicating work.", + "If you want to re-transcode it, delete the existing output file first, then retry the job.", + ); + case "incomplete_metadata": + return makeDecision( + "incomplete_metadata", + "Missing file metadata", + `FFprobe could not determine the ${measured.missing ?? "required metadata"} for this file. Without reliable metadata Alchemist cannot make a valid transcoding decision.`, + "Run a Library Doctor scan to check if this file is corrupt. Try playing it in a media player to confirm it is readable.", + ); + case "already_10bit": + return makeDecision( + "already_10bit", + "Already 10-bit", + "This file is already encoded in high-quality 10-bit depth. Re-encoding it could reduce quality.", + null, + ); + case "remux: mp4_to_mkv_stream_copy": + case "remux_mp4_to_mkv_stream_copy": + return makeDecision( + "remux_mp4_to_mkv_stream_copy", + "Remuxed (no re-encode)", + "This file was remuxed from MP4 to MKV using stream copy - fast and lossless. No quality was lost.", + null, + ); + case "Low quality (VMAF)": + case "quality_below_threshold": + return makeDecision( + "quality_below_threshold", + "Quality check failed", + "The encoded file scored below the minimum VMAF quality threshold. Alchemist rejected the output to protect quality.", + "The original file has been preserved. You can lower the VMAF threshold in Settings -> Quality, or disable VMAF checking entirely.", + ); + case "transcode_h264_source": + return makeDecision( + "transcode_h264_source", + "H.264 source prioritized", + "This file is H.264, which is typically a strong candidate for reclaiming space, so Alchemist prioritized it for transcoding.", + null, + ); + case "transcode_recommended": + return makeDecision( + "transcode_recommended", + "Transcode recommended", + "Alchemist determined this file is a strong candidate for transcoding based on the current codec and measured efficiency.", + null, + ); + default: + return makeDecision("legacy_decision", "Decision recorded", reason, null); + } +} + +export function explainFailureSummary(summary: string): ExplanationView { + const normalized = summary.toLowerCase(); + + const makeFailure = ( + code: string, + title: string, + detail: string, + operator_guidance: string | null, + ): ExplanationView => ({ + category: "failure", + code, + summary: title, + detail, + operator_guidance, + measured: {}, + legacy_reason: summary, + }); + + if (normalized.includes("cancelled")) { + return makeFailure("cancelled", "Job was cancelled", "This job was cancelled before encoding completed. The original file is untouched.", null); + } + if (normalized.includes("no such file or directory")) { + return makeFailure("source_missing", "Source file missing", "The source file could not be found. It may have been moved or deleted.", "Check that the source file still exists and is readable by Alchemist."); + } + if (normalized.includes("invalid data found") || normalized.includes("moov atom not found")) { + return makeFailure("corrupt_or_unreadable_media", "Media could not be read", "This file appears to be corrupt or incomplete. Try running a Library Doctor scan.", "Verify the source file manually or run Library Doctor to confirm whether it is readable."); + } + if (normalized.includes("permission denied")) { + return makeFailure("permission_denied", "Permission denied", "Alchemist doesn't have permission to read this file. Check the file permissions.", "Check the file and output path permissions for the Alchemist process user."); + } + if (normalized.includes("encoder not found") || normalized.includes("unknown encoder")) { + return makeFailure("encoder_unavailable", "Required encoder unavailable", "The required encoder is not available in your FFmpeg installation.", "Check FFmpeg encoder availability and hardware settings."); + } + if (normalized.includes("out of memory") || normalized.includes("cannot allocate memory")) { + return makeFailure("resource_exhausted", "System ran out of memory", "The system ran out of memory during encoding. Try reducing concurrent jobs.", "Reduce concurrent jobs or rerun under lower system load."); + } + if (normalized.includes("transcode_failed") || normalized.includes("ffmpeg exited")) { + return makeFailure("unknown_ffmpeg_failure", "FFmpeg failed", "FFmpeg failed during encoding. This is often caused by a corrupt source file or an encoder configuration issue. Check the logs below for the specific FFmpeg error.", "Inspect the FFmpeg output in the job logs for the exact failure."); + } + if (normalized.includes("probing failed")) { + return makeFailure("analysis_failed", "Analysis failed", "FFprobe could not read this file. It may be corrupt or in an unsupported format.", "Inspect the source file manually or run Library Doctor to confirm whether it is readable."); + } + if (normalized.includes("planning_failed") || normalized.includes("planner")) { + return makeFailure("planning_failed", "Planner failed", "An error occurred while planning the transcode. Check the logs below for details.", "Treat repeated planner failures as a bug and inspect the logs for the triggering input."); + } + if (normalized.includes("output_size=0") || normalized.includes("output was empty")) { + return makeFailure("unknown_ffmpeg_failure", "Empty output produced", "Encoding produced an empty output file. This usually means FFmpeg crashed silently. Check the logs below for FFmpeg output.", "Inspect the FFmpeg logs before retrying the job."); + } + if (normalized.includes("videotoolbox") || normalized.includes("vt_compression") || normalized.includes("err=-12902") || normalized.includes("mediaserverd") || normalized.includes("no capable devices")) { + return makeFailure("hardware_backend_failure", "Hardware backend failed", "The VideoToolbox hardware encoder failed. This can happen when the GPU is busy, the file uses an unsupported pixel format, or macOS Media Services are unavailable.", "Retry the job. If it keeps failing, check the hardware probe log or enable CPU fallback in Settings -> Hardware."); + } + if (normalized.includes("encoder fallback") || normalized.includes("fallback detected")) { + return makeFailure("fallback_blocked", "Fallback blocked by policy", "The hardware encoder was unavailable and fell back to software encoding, which was not allowed by your settings.", "Enable CPU fallback in Settings -> Hardware, or retry when the GPU is less busy."); + } + if (normalized.includes("ffmpeg failed")) { + return makeFailure("unknown_ffmpeg_failure", "FFmpeg failed", "FFmpeg failed during encoding. Check the logs below for the specific error. Common causes: unsupported pixel format, codec not available, or corrupt source file.", "Inspect the FFmpeg output in the job logs for the exact failure."); + } + + return makeFailure("legacy_failure", "Failure recorded", summary, "Inspect the job logs for additional context."); +} + +export function explainFailureLogs(logs: LogEntry[]): ExplanationView | null { + const sourceEntries = logs.filter((entry) => entry.message.trim().length > 0); + if (sourceEntries.length === 0) return null; + + const recentEntries = sourceEntries.slice(-25); + const prioritizedEntry = [...recentEntries] + .reverse() + .find((entry) => ["error", "warn", "warning"].includes(entry.level.toLowerCase())) + ?? recentEntries[recentEntries.length - 1]; + const combined = recentEntries.map((entry) => entry.message).join("\n"); + const normalized = combined.toLowerCase(); + const primaryMessage = prioritizedEntry.message; + + const makeFailure = ( + code: string, + summary: string, + detail: string, + operator_guidance: string | null, + ): ExplanationView => ({ + category: "failure", + code, + summary, + detail, + operator_guidance, + measured: {}, + legacy_reason: primaryMessage, + }); + + if (normalized.includes("qscale not available for encoder")) { + return makeFailure("encoder_parameter_mismatch", "Encoder settings rejected", "FFmpeg rejected the selected encoder parameters for this hardware backend. The command was accepted by Alchemist, but the encoder refused to start with the generated rate-control options.", "Check the FFmpeg output below for the rejected flag and compare it with your current codec and hardware settings."); + } + if (normalized.includes("videotoolbox") || normalized.includes("vt_compression") || normalized.includes("mediaserverd") || normalized.includes("no capable devices") || normalized.includes("could not open encoder before eof")) { + return makeFailure("hardware_backend_failure", "Hardware backend failed", "The hardware encoder failed to initialize or produce output. This usually points to an unsupported source format, a backend-specific FFmpeg parameter issue, or temporary media-services instability on the host.", "Retry the job first. If it fails again, inspect the backend-specific FFmpeg lines below and verify hardware fallback settings."); + } + if (normalized.includes("nothing was written into output file") || normalized.includes("received no packets") || normalized.includes("output_size=0") || normalized.includes("conversion failed")) { + return makeFailure("empty_output", "Encoder produced no output", "FFmpeg ran, but no media packets were successfully written to the output file. This usually means the encoder crashed or rejected the stream before real output started.", "Check the lines around the first FFmpeg error below to find the encoder/backend-specific cause."); + } + if (normalized.includes("unknown encoder") || normalized.includes("encoder not found")) { + return makeFailure("encoder_unavailable", "Required encoder unavailable", "The selected encoder is not available in this FFmpeg build.", "Verify FFmpeg encoder support and your hardware settings, then retry the job."); + } + if (normalized.includes("invalid data found") || normalized.includes("moov atom not found") || normalized.includes("error while decoding") || normalized.includes("corrupt")) { + return makeFailure("corrupt_or_unreadable_media", "Media could not be decoded", "FFmpeg hit a decode/read error while processing the source. The file is likely corrupt, incomplete, or not fully readable.", "Try playing the file manually or run Library Doctor to confirm whether the source is intact."); + } + if (normalized.includes("permission denied") || normalized.includes("operation not permitted") || normalized.includes("read-only file system") || normalized.includes("no such file or directory")) { + return makeFailure("path_or_permission_failure", "Path or permission failure", "Alchemist could not read the source or write the output at the required path.", "Check that the source still exists and that the Alchemist process user can read and write the configured paths."); + } + if (normalized.includes("ffmpeg failed") || normalized.includes("transcode failed")) { + return makeFailure("unknown_ffmpeg_failure", "FFmpeg failed", "FFmpeg reported a fatal encoding error, but no more specific structured explanation was stored for this job.", "Inspect the raw FFmpeg output below for the first concrete encoder or media error."); + } + + return null; +} + +export function normalizeDecisionExplanation( + explanation: ExplanationView | null | undefined, + legacyReason?: string | null, +): ExplanationView | null { + if (explanation) return explanation; + if (legacyReason) return humanizeSkipReason(legacyReason); + return null; +} + +export function normalizeFailureExplanation( + explanation: ExplanationView | null | undefined, + legacySummary?: string | null, + logs?: LogEntry[] | null, +): ExplanationView | null { + if (explanation) return explanation; + if (logs && logs.length > 0) { + const parsedFromLogs = explainFailureLogs(logs); + if (parsedFromLogs) return parsedFromLogs; + } + if (legacySummary) return explainFailureSummary(legacySummary); + return null; +} diff --git a/web/src/components/jobs/JobsTable.tsx b/web/src/components/jobs/JobsTable.tsx new file mode 100644 index 0000000..b1bfadd --- /dev/null +++ b/web/src/components/jobs/JobsTable.tsx @@ -0,0 +1,225 @@ +import { RefreshCw, Ban, Trash2, MoreHorizontal } from "lucide-react"; +import { motion, AnimatePresence } from "framer-motion"; +import { clsx, type ClassValue } from "clsx"; +import { twMerge } from "tailwind-merge"; +import type { RefObject, MutableRefObject } from "react"; +import type React from "react"; +import type { Job, ConfirmConfig } from "./types"; +import { isJobActive, retryCountdown } from "./types"; + +function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} + +interface JobsTableProps { + jobs: Job[]; + loading: boolean; + selected: Set; + focusedJobId: number | null; + tick: number; + encodeStartTimes: MutableRefObject>; + menuJobId: number | null; + menuRef: RefObject; + toggleSelect: (id: number) => void; + toggleSelectAll: () => void; + fetchJobDetails: (id: number) => Promise; + setMenuJobId: (id: number | null) => void; + openConfirm: (config: ConfirmConfig) => void; + handleAction: (id: number, action: "cancel" | "restart" | "delete") => Promise; + handlePriority: (job: Job, priority: number, label: string) => Promise; + getStatusBadge: (status: string) => React.ReactElement; +} + +function calcEta(encodeStartTimes: MutableRefObject>, jobId: number, progress: number): string | null { + if (progress <= 0 || progress >= 100) return null; + const startMs = encodeStartTimes.current.get(jobId); + if (!startMs) return null; + const elapsedMs = Date.now() - startMs; + const totalMs = elapsedMs / (progress / 100); + const remainingMs = totalMs - elapsedMs; + const remainingSecs = Math.round(remainingMs / 1000); + if (remainingSecs < 0) return null; + if (remainingSecs < 60) return `~${remainingSecs}s remaining`; + const mins = Math.ceil(remainingSecs / 60); + return `~${mins} min remaining`; +} + +export function JobsTable({ + jobs, loading, selected, focusedJobId, tick, encodeStartTimes, + menuJobId, menuRef, toggleSelect, toggleSelectAll, + fetchJobDetails, setMenuJobId, openConfirm, handleAction, handlePriority, + getStatusBadge, +}: JobsTableProps) { + return ( +
+ + + + + + + + + + + + + {loading && jobs.length === 0 ? ( + Array.from({ length: 5 }).map((_, index) => ( + + + + )) + ) : jobs.length === 0 ? ( + + + + ) : ( + jobs.map((job) => ( + void fetchJobDetails(job.id)} + className={cn( + "group hover:bg-helios-surface/80 transition-all cursor-pointer", + selected.has(job.id) && "bg-helios-surface-soft", + focusedJobId === job.id && "bg-helios-solar/5" + )} + > + + + + + + + + )) + )} + +
+ 0 && jobs.every(j => selected.has(j.id))} + onChange={toggleSelectAll} + className="rounded border-helios-line/30 bg-helios-surface-soft accent-helios-solar" + /> + FileStatusProgressUpdated
+
+
+ No jobs found +
e.stopPropagation()}> + toggleSelect(job.id)} + className="rounded border-helios-line/30 bg-helios-surface-soft accent-helios-solar" + /> + + + + {job.input_path.split(/[/\\]/).pop()} + +
+ + {job.input_path} + + + P{job.priority} + +
+
+
+ + {getStatusBadge(job.status)} + + {job.status === "failed" && (() => { + void tick; + const countdown = retryCountdown(job); + return countdown ? ( +

+ {countdown} +

+ ) : null; + })()} +
+ {["encoding", "analyzing", "remuxing"].includes(job.status) ? ( +
+
+
+
+
+ {job.progress.toFixed(1)}% +
+ {job.status === "encoding" && (() => { + const eta = calcEta(encodeStartTimes, job.id, job.progress); + return eta ? ( +

{eta}

+ ) : null; + })()} + {job.status === "encoding" && job.encoder && ( + + {job.encoder} + + )} +
+ ) : ( + job.vmaf_score ? ( + + VMAF: {job.vmaf_score.toFixed(1)} + + ) : ( + - + ) + )} +
+ {new Date(job.updated_at).toLocaleString()} + e.stopPropagation()}> +
) : null}> + + + {menuJobId === job.id && ( + + + + + + {(job.status === "failed" || job.status === "cancelled") && ( + + )} + {["encoding", "analyzing", "remuxing"].includes(job.status) && ( + + )} + {!isJobActive(job) && ( + + )} + + )} + +
+
+
+ ); +} diff --git a/web/src/components/jobs/JobsToolbar.tsx b/web/src/components/jobs/JobsToolbar.tsx new file mode 100644 index 0000000..8f16274 --- /dev/null +++ b/web/src/components/jobs/JobsToolbar.tsx @@ -0,0 +1,140 @@ +import { Search, RefreshCw, ArrowDown, ArrowUp } from "lucide-react"; +import { clsx, type ClassValue } from "clsx"; +import { twMerge } from "tailwind-merge"; +import type { RefObject } from "react"; +import type React from "react"; +import type { TabType, SortField } from "./types"; +import { SORT_OPTIONS } from "./types"; + +function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)); +} + +interface JobsToolbarProps { + activeTab: TabType; + setActiveTab: (tab: TabType) => void; + setPage: (page: number) => void; + searchInput: string; + setSearchInput: (s: string) => void; + compactSearchOpen: boolean; + setCompactSearchOpen: (fn: boolean | ((prev: boolean) => boolean)) => void; + compactSearchRef: RefObject; + compactSearchInputRef: RefObject; + sortBy: SortField; + setSortBy: (s: SortField) => void; + sortDesc: boolean; + setSortDesc: (fn: boolean | ((prev: boolean) => boolean)) => void; + refreshing: boolean; + fetchJobs: () => Promise; +} + +export function JobsToolbar({ + activeTab, setActiveTab, setPage, + searchInput, setSearchInput, + compactSearchOpen, setCompactSearchOpen, compactSearchRef, compactSearchInputRef, + sortBy, setSortBy, sortDesc, setSortDesc, + refreshing, fetchJobs, +}: JobsToolbarProps) { + return ( +
+
+ {(["all", "active", "queued", "completed", "failed", "skipped", "archived"] as TabType[]).map((tab) => ( + + ))} +
+ +
+
+
+ + setSearchInput(e.target.value)} + className="w-full bg-helios-surface border border-helios-line/20 rounded-lg pl-9 pr-4 py-2 text-sm text-helios-ink focus:border-helios-solar outline-none" + /> +
+ + +
+ +
+ +
} className="relative xl:hidden"> + +
+
+ + } + type="text" + placeholder="Search files..." + value={searchInput} + onChange={(e) => setSearchInput(e.target.value)} + className="ml-2 min-w-0 flex-1 bg-transparent text-sm text-helios-ink outline-none placeholder:text-helios-slate" + /> +
+
+
+
+
+
+ ); +} diff --git a/web/src/components/jobs/types.ts b/web/src/components/jobs/types.ts new file mode 100644 index 0000000..5956311 --- /dev/null +++ b/web/src/components/jobs/types.ts @@ -0,0 +1,218 @@ +// Shared types for job management components + +export interface ExplanationView { + category: "decision" | "failure"; + code: string; + summary: string; + detail: string; + operator_guidance: string | null; + measured: Record; + legacy_reason: string; +} + +export interface ExplanationPayload { + category: "decision" | "failure"; + code: string; + summary: string; + detail: string; + operator_guidance: string | null; + measured: Record; + legacy_reason: string; +} + +export interface Job { + id: number; + input_path: string; + output_path: string; + status: string; + priority: number; + progress: number; + created_at: string; + updated_at: string; + attempt_count: number; + vmaf_score?: number; + decision_reason?: string; + decision_explanation?: ExplanationPayload | null; + encoder?: string; +} + +export interface JobMetadata { + duration_secs: number; + codec_name: string; + width: number; + height: number; + bit_depth?: number; + size_bytes: number; + video_bitrate_bps?: number; + container_bitrate_bps?: number; + fps: number; + container: string; + audio_codec?: string; + audio_channels?: number; + dynamic_range?: string; +} + +export interface EncodeStats { + input_size_bytes: number; + output_size_bytes: number; + compression_ratio: number; + encode_time_seconds: number; + encode_speed: number; + avg_bitrate_kbps: number; + vmaf_score?: number; +} + +export interface EncodeAttempt { + id: number; + attempt_number: number; + started_at: string | null; + finished_at: string; + outcome: "completed" | "failed" | "cancelled"; + failure_code: string | null; + failure_summary: string | null; + input_size_bytes: number | null; + output_size_bytes: number | null; + encode_time_seconds: number | null; +} + +export interface LogEntry { + id: number; + level: string; + message: string; + created_at: string; +} + +export interface JobDetail { + job: Job; + metadata: JobMetadata | null; + encode_stats: EncodeStats | null; + encode_attempts: EncodeAttempt[] | null; + job_logs: LogEntry[]; + job_failure_summary: string | null; + decision_explanation: ExplanationPayload | null; + failure_explanation: ExplanationPayload | null; +} + +export interface CountMessageResponse { + count: number; + message: string; +} + +export interface ConfirmConfig { + title: string; + body: string; + confirmLabel: string; + confirmTone?: "danger" | "primary"; + onConfirm: () => Promise | void; +} + +export type TabType = "all" | "active" | "queued" | "completed" | "failed" | "skipped" | "archived"; +export type SortField = "updated_at" | "created_at" | "input_path" | "size"; + +export const SORT_OPTIONS: Array<{ value: SortField; label: string }> = [ + { value: "updated_at", label: "Last Updated" }, + { value: "created_at", label: "Date Added" }, + { value: "input_path", label: "File Name" }, + { value: "size", label: "File Size" }, +]; + +// Pure data utilities + +export function isJobActive(job: Job): boolean { + return ["analyzing", "encoding", "remuxing", "resuming"].includes(job.status); +} + +export function retryCountdown(job: Job): string | null { + if (job.status !== "failed") return null; + if (!job.attempt_count || job.attempt_count === 0) return null; + + const backoffMins = + job.attempt_count === 1 ? 5 + : job.attempt_count === 2 ? 15 + : job.attempt_count === 3 ? 60 + : 360; + + const updatedMs = new Date(job.updated_at).getTime(); + const retryAtMs = updatedMs + backoffMins * 60 * 1000; + const remainingMs = retryAtMs - Date.now(); + + if (remainingMs <= 0) return "Retrying soon"; + + const remainingMins = Math.ceil(remainingMs / 60_000); + if (remainingMins < 60) return `Retrying in ${remainingMins}m`; + const hrs = Math.floor(remainingMins / 60); + const mins = remainingMins % 60; + return mins > 0 ? `Retrying in ${hrs}h ${mins}m` : `Retrying in ${hrs}h`; +} + +export function formatBytes(bytes: number): string { + if (bytes === 0) return "0 B"; + const k = 1024; + const sizes = ["B", "KB", "MB", "GB", "TB"]; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i]; +} + +export function formatDuration(seconds: number): string { + const h = Math.floor(seconds / 3600); + const m = Math.floor((seconds % 3600) / 60); + const s = Math.floor(seconds % 60); + return [h, m, s].map(v => v.toString().padStart(2, "0")).join(":"); +} + +export function logLevelClass(level: string): string { + switch (level.toLowerCase()) { + case "error": + return "text-status-error"; + case "warn": + case "warning": + return "text-helios-solar"; + default: + return "text-helios-slate"; + } +} + +export function jobDetailEmptyState(status: string): { title: string; detail: string } { + switch (status) { + case "queued": + return { + title: "Waiting in queue", + detail: "This job is queued and waiting for an available worker slot.", + }; + case "analyzing": + return { + title: "Analyzing media", + detail: "Alchemist is reading the file metadata and planning the next action.", + }; + case "encoding": + return { + title: "Encoding in progress", + detail: "The transcode is running now. Detailed input metadata may appear once analysis data is fully persisted.", + }; + case "remuxing": + return { + title: "Remuxing in progress", + detail: "The job is copying compatible streams into the target container without re-encoding video.", + }; + case "resuming": + return { + title: "Resuming job", + detail: "The job is being re-queued and prepared to continue processing.", + }; + case "failed": + return { + title: "No metadata captured", + detail: "This job failed before Alchemist could persist complete media metadata.", + }; + case "skipped": + return { + title: "No metadata captured", + detail: "This file was skipped before full media metadata was stored in the job detail view.", + }; + default: + return { + title: "No encode data available", + detail: "Detailed metadata is not available for this job yet.", + }; + } +} diff --git a/web/src/components/jobs/useJobSSE.ts b/web/src/components/jobs/useJobSSE.ts new file mode 100644 index 0000000..983927f --- /dev/null +++ b/web/src/components/jobs/useJobSSE.ts @@ -0,0 +1,92 @@ +import { useEffect } from "react"; +import type { MutableRefObject, Dispatch, SetStateAction } from "react"; +import type { Job } from "./types"; + +interface UseJobSSEOptions { + setJobs: Dispatch>; + fetchJobsRef: MutableRefObject<() => Promise>; + encodeStartTimes: MutableRefObject>; +} + +export function useJobSSE({ setJobs, fetchJobsRef, encodeStartTimes }: UseJobSSEOptions): void { + useEffect(() => { + let eventSource: EventSource | null = null; + let cancelled = false; + let reconnectTimeout: number | null = null; + let reconnectAttempts = 0; + + const getReconnectDelay = () => { + const baseDelay = 1000; + const maxDelay = 30000; + const delay = Math.min(baseDelay * Math.pow(2, reconnectAttempts), maxDelay); + const jitter = delay * 0.25 * (Math.random() * 2 - 1); + return Math.round(delay + jitter); + }; + + const connect = () => { + if (cancelled) return; + eventSource?.close(); + eventSource = new EventSource("/api/events"); + + eventSource.onopen = () => { + reconnectAttempts = 0; + }; + + eventSource.addEventListener("status", (e) => { + try { + const { job_id, status } = JSON.parse(e.data) as { + job_id: number; + status: string; + }; + if (status === "encoding") { + encodeStartTimes.current.set(job_id, Date.now()); + } else { + encodeStartTimes.current.delete(job_id); + } + setJobs((prev) => + prev.map((job) => job.id === job_id ? { ...job, status } : job) + ); + } catch { + /* ignore malformed */ + } + }); + + eventSource.addEventListener("progress", (e) => { + try { + const { job_id, percentage } = JSON.parse(e.data) as { + job_id: number; + percentage: number; + }; + setJobs((prev) => + prev.map((job) => job.id === job_id ? { ...job, progress: percentage } : job) + ); + } catch { + /* ignore malformed */ + } + }); + + eventSource.addEventListener("decision", () => { + void fetchJobsRef.current(); + }); + + eventSource.onerror = () => { + eventSource?.close(); + if (!cancelled) { + reconnectAttempts++; + const delay = getReconnectDelay(); + reconnectTimeout = window.setTimeout(connect, delay); + } + }; + }; + + connect(); + + return () => { + cancelled = true; + eventSource?.close(); + if (reconnectTimeout !== null) { + window.clearTimeout(reconnectTimeout); + } + }; + }, []); +} diff --git a/web/src/components/ui/ToastRegion.tsx b/web/src/components/ui/ToastRegion.tsx index ed5d2a0..1246461 100644 --- a/web/src/components/ui/ToastRegion.tsx +++ b/web/src/components/ui/ToastRegion.tsx @@ -9,24 +9,24 @@ function kindStyles(kind: ToastKind): { icon: LucideIcon; className: string } { if (kind === "success") { return { icon: CheckCircle2, - className: "border-status-success/30 bg-status-success/10 text-status-success", + className: "border-status-success/35 bg-helios-surface/95 text-status-success supports-[backdrop-filter]:bg-helios-surface/80 backdrop-blur-xl", }; } if (kind === "error") { return { icon: AlertCircle, - className: "border-status-error/30 bg-status-error/10 text-status-error", + className: "border-status-error/35 bg-helios-surface/95 text-status-error supports-[backdrop-filter]:bg-helios-surface/80 backdrop-blur-xl", }; } if (kind === "warning") { return { icon: AlertTriangle, - className: "border-amber-500/30 bg-amber-500/10 text-amber-500", + className: "border-amber-500/35 bg-helios-surface/95 text-amber-500 supports-[backdrop-filter]:bg-helios-surface/80 backdrop-blur-xl", }; } return { icon: Info, - className: "border-helios-line/40 bg-helios-surface text-helios-ink", + className: "border-helios-line/40 bg-helios-surface/95 text-helios-ink supports-[backdrop-filter]:bg-helios-surface/80 backdrop-blur-xl", }; } @@ -84,7 +84,7 @@ export default function ToastRegion() {
diff --git a/web/src/styles/global.css b/web/src/styles/global.css index c4af716..a3cb159 100644 --- a/web/src/styles/global.css +++ b/web/src/styles/global.css @@ -364,13 +364,13 @@ [data-color-profile="midnight"] { --bg-main: 0 0 0; - --bg-panel: 5 5 5; - --bg-elevated: 10 10 10; + --bg-panel: 0 0 0; + --bg-elevated: 0 0 0; --accent-primary: 255 255 255; --accent-secondary: 200 200 200; --text-primary: 255 255 255; --text-muted: 150 150 150; - --border-subtle: 50 50 50; + --border-subtle: 34 34 34; } [data-color-profile="monochrome"] { @@ -475,6 +475,33 @@ z-index: -1; pointer-events: none; } + + html[data-color-profile="midnight"] { + background: rgb(0 0 0); + } + + html[data-color-profile="midnight"] body, + html[data-color-profile="midnight"] body::before, + html[data-color-profile="midnight"] .app-main { + background: rgb(0 0 0); + background-color: rgb(0 0 0); + } + + /* Midnight OLED: suppress decorative accent-tinted gradient overlays so + OLED pixels stay fully off. These divs use from-helios-solar/10 which + maps to rgba(255,255,255,0.10) — a visible gray on pure-black OLED. */ + html[data-color-profile="midnight"] .from-helios-solar\/10 { + --tw-gradient-from: rgb(0 0 0 / 0) var(--tw-gradient-from-position); + --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); + } + + /* Midnight OLED: darken scrollbar thumb so it doesn't glow white. */ + html[data-color-profile="midnight"] *::-webkit-scrollbar-thumb { + background-color: rgba(255 255 255 / 0.08); + } + html[data-color-profile="midnight"] *::-webkit-scrollbar-thumb:hover { + background-color: rgba(255 255 255 / 0.15); + } } @layer components {