Improve runtime defaults and preserve job history

This commit is contained in:
2026-03-21 19:03:09 -04:00
parent a2a3d0e9af
commit 5842297fdc
26 changed files with 914 additions and 48403 deletions

View File

@@ -38,6 +38,27 @@ jobs:
id: image
run: echo "name=$(echo '${{ github.repository }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Compute release lane
id: release_lane
shell: bash
run: |
VERSION="${{ steps.version.outputs.version }}"
IMAGE="${{ env.REGISTRY }}/${{ steps.image.outputs.name }}"
if [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
{
echo "tags<<EOF"
echo "${IMAGE}:${VERSION}"
echo "${IMAGE}:latest"
echo "EOF"
} >> "$GITHUB_OUTPUT"
else
{
echo "tags<<EOF"
echo "${IMAGE}:${VERSION}"
echo "EOF"
} >> "$GITHUB_OUTPUT"
fi
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@@ -57,9 +78,7 @@ jobs:
context: .
platforms: linux/amd64
push: true
tags: |
${{ env.REGISTRY }}/${{ steps.image.outputs.name }}:${{ steps.version.outputs.version }}
${{ env.REGISTRY }}/${{ steps.image.outputs.name }}:latest
tags: ${{ steps.release_lane.outputs.tags }}
labels: |
org.opencontainers.image.version=${{ steps.version.outputs.version }}
org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}

View File

@@ -26,6 +26,18 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Determine release channel
id: release_channel
shell: bash
run: |
if [[ "${GITHUB_REF_NAME}" == *-rc.* ]]; then
echo "prerelease=true" >> "$GITHUB_OUTPUT"
echo "make_latest=false" >> "$GITHUB_OUTPUT"
else
echo "prerelease=false" >> "$GITHUB_OUTPUT"
echo "make_latest=true" >> "$GITHUB_OUTPUT"
fi
# Use bundled Bun (installed via npm or setup-bun) to build frontend
- name: Install Bun
@@ -59,11 +71,13 @@ jobs:
cp target/${{ matrix.target }}/release/alchemist.exe alchemist-${{ matrix.target_name }}.exe
- name: Upload Release Assets
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
alchemist-${{ matrix.target_name }}.exe
prerelease: ${{ steps.release_channel.outputs.prerelease }}
make_latest: ${{ steps.release_channel.outputs.make_latest }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -86,6 +100,24 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Determine release channel
id: release_channel
shell: bash
run: |
if [[ "${GITHUB_REF_NAME}" == *-rc.* ]]; then
echo "prerelease=true" >> "$GITHUB_OUTPUT"
echo "make_latest=false" >> "$GITHUB_OUTPUT"
else
echo "prerelease=false" >> "$GITHUB_OUTPUT"
echo "make_latest=true" >> "$GITHUB_OUTPUT"
fi
- name: Read app version
id: app_version
shell: bash
run: |
echo "value=$(grep -m1 '^version = ' Cargo.toml | cut -d'\"' -f2)" >> "$GITHUB_OUTPUT"
- name: Install Bun
uses: oven-sh/setup-bun@v1
with:
@@ -128,9 +160,9 @@ jobs:
<key>CFBundleName</key>
<string>$APP_NAME</string>
<key>CFBundleShortVersionString</key>
<string>${{ github.ref_name }}</string>
<string>${{ steps.app_version.outputs.value }}</string>
<key>CFBundleVersion</key>
<string>${{ github.ref_name }}</string>
<string>${{ steps.app_version.outputs.value }}</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>LSApplicationCategoryType</key>
@@ -149,10 +181,12 @@ jobs:
ditto -c -k --keepParent "$APP_DIR" "alchemist-macos-${{ matrix.target_name }}.zip"
- name: Upload Release Assets
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: alchemist-macos-${{ matrix.target_name }}.zip
prerelease: ${{ steps.release_channel.outputs.prerelease }}
make_latest: ${{ steps.release_channel.outputs.make_latest }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
@@ -166,6 +200,18 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Determine release channel
id: release_channel
shell: bash
run: |
if [[ "${GITHUB_REF_NAME}" == *-rc.* ]]; then
echo "prerelease=true" >> "$GITHUB_OUTPUT"
echo "make_latest=false" >> "$GITHUB_OUTPUT"
else
echo "prerelease=false" >> "$GITHUB_OUTPUT"
echo "make_latest=true" >> "$GITHUB_OUTPUT"
fi
- name: Install Bun
uses: oven-sh/setup-bun@v1
with:
@@ -223,9 +269,11 @@ jobs:
mv Alchemist*.AppImage alchemist-linux-x86_64.AppImage
- name: Upload Release Assets
uses: softprops/action-gh-release@v1
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: alchemist-linux-x86_64.AppImage
prerelease: ${{ steps.release_channel.outputs.prerelease }}
make_latest: ${{ steps.release_channel.outputs.make_latest }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

10
.idea/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,10 @@
# Default ignored files
/shelf/
/workspace.xml
# Ignored default folder with query files
/queries/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Editor-based HTTP Client requests
/httpRequests/

12
.idea/alchemist.iml generated Normal file
View File

@@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="EMPTY_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$">
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
<sourceFolder url="file://$MODULE_DIR$/tests" isTestSource="true" />
<excludeFolder url="file://$MODULE_DIR$/target" />
</content>
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

8
.idea/modules.xml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/alchemist.iml" filepath="$PROJECT_DIR$/.idea/alchemist.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml generated Normal file
View File

@@ -0,0 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

View File

@@ -2,6 +2,13 @@
All notable changes to this project will be documented in this file.
## [v0.2.10-rc.1] - 2026-03-07
- Job lifecycle safety hardening: queued vs active cancel handling, active-job delete/restart blocking, batch-action conflict reporting, and stricter status/stat persistence.
- Output handling now supports mirrored `output_root` destinations plus temp-file promotion so replace mode preserves the last good artifact until encode, size, and quality gates pass.
- Scheduler, setup, and watch-folder parity updates shipped together: immediate schedule reevaluation, Intel Arc H.264 detection fix, H.264 setup option, canonicalized watch folders, and recursive watch configuration in the UI.
- Jobs and settings UX now expose per-job priority controls, output-root file settings, active-job-safe actions, and the Astro router deprecation cleanup.
- RC release hardening for `0.2.10-rc.1`: version metadata synced across app, Docker, and web artifacts; prerelease workflow behavior added; release instructions updated around main-driven RC Docker tags and tag-driven prerelease assets.
## [v0.2.9] - 2026-03-06
- Runtime reliability pass: watcher/scanner hardening, resilient event consumers, config reload improvements, and live hardware refresh.
- Admin UX refresh across dashboard, settings, setup, logs, jobs, charts, and system status with stronger error handling and feedback.

2
Cargo.lock generated
View File

@@ -26,7 +26,7 @@ dependencies = [
[[package]]
name = "alchemist"
version = "0.2.9"
version = "0.2.10-rc.1"
dependencies = [
"anyhow",
"argon2",

View File

@@ -1,6 +1,6 @@
[package]
name = "alchemist"
version = "0.2.9"
version = "0.2.10-rc.1"
edition = "2021"
license = "GPL-3.0"
build = "build.rs"

View File

@@ -2,18 +2,23 @@
> Intelligent video transcoding automation with hardware acceleration and CPU fallback
Alchemist is a Rust-based video transcoding system that automatically converts your media library to efficient AV1 format using hardware acceleration (GPU) or software encoding (CPU fallback).
Alchemist is a Rust-based video transcoding system that automatically converts your media library to efficient modern codecs using hardware acceleration (GPU) or software encoding (CPU fallback).
## Features
- **Hardware Acceleration**: Supports NVIDIA (NVENC), Intel (QSV), AMD (VAAPI), Apple (VideoToolbox)
- **CPU Fallback**: Automatic software encoding with libsvtav1 when GPU is unavailable
- **Intelligent Analysis**: Only transcodes files that will benefit from AV1 encoding
- **Web Dashboard**: Real-time monitoring and control via Axum/Askama/HTMX-based UI
- **Codec Targets**: AV1, HEVC, and H.264 output profiles
- **CPU Fallback**: Automatic software encoding when GPU acceleration is unavailable
- **Intelligent Analysis**: Only transcodes files that will benefit from compression
- **Web Dashboard**: Axum backend with an Astro + React frontend for real-time monitoring and control
- **Single Binary**: All assets and templates are embedded into the binary for easy deployment
- **Background Processing**: Queue-based system with concurrent job support
- **Performance Metrics**: Detailed logging and statistics for each transcode job
- **HDR Handling**: Preserve HDR metadata or tonemap to SDR for compatibility
- **Priority Queueing**: Promote, lower, or reset per-job priority from the dashboard
- **Watch Folders**: Mix recursive and top-level watch directories
- **Safe Replace Flow**: Replacement encodes write to a temporary file and promote only after all gates pass
- **Mirrored Output Roots**: Optionally write outputs to a separate root while preserving source-relative folders
## Quick Start
@@ -57,7 +62,7 @@ docker run -d \
alchemist
```
Access the web interface at `http://localhost:3000`
Access the web interface at `http://localhost:3000`.
## Configuration
@@ -84,10 +89,19 @@ directories = [ # Auto-scan directories
Runtime environment variables:
- `ALCHEMIST_CONFIG_PATH` config file path (default: `./config.toml`)
- `ALCHEMIST_DB_PATH` SQLite database path (default: `./alchemist.db`)
- `ALCHEMIST_CONFIG_PATH` config file path (default: `~/.openbitdo/config.toml` on Linux/macOS, `./config.toml` elsewhere)
- `ALCHEMIST_DB_PATH` SQLite database path (default: `~/.openbitdo/alchemist.db` on Linux/macOS, `./alchemist.db` elsewhere)
- `ALCHEMIST_CONFIG_MUTABLE` allow runtime config writes (`true`/`false`, default: `true`)
Most operational settings are managed from the web UI after first boot:
- `Transcode`: codec target, concurrency, thresholds, HDR handling
- `Files`: output suffix/extension, optional `output_root`, replace strategy, source deletion
- `Watch Folders`: add/remove canonicalized paths and choose recursive or top-level watching
- `Jobs`: cancel active work, restart terminal jobs, delete terminal history, and adjust job priority
When `output_root` is set, Alchemist mirrors the source-relative directory structure under that root. If it cannot determine a matching source root, it falls back to sibling output behavior.
## Supported Platforms
- **Linux**: x86_64 (Docker & Binary)

View File

@@ -1 +1 @@
0.2.9
0.2.10-rc.1

107
audit.md Normal file
View File

@@ -0,0 +1,107 @@
# Alchemist Codebase Audit
Alchemist is an intelligent video transcoding automation system written in Rust. It utilizes a background queue with hardware acceleration and CPU fallback to efficiently convert user media libraries to modern codecs (AV1, HEVC, H.264) based on bitrate, resolution, and BPP (bits per pixel) heuristics.
This audit documentation provides a detailed breakdown of the codebase's architecture, core modules, data structures, and the flow of media processing.
## 1. System Architecture & Entry Point
Alchemist can run in two modes:
1. **Server Mode (Default):** Runs an Axum web server providing a REST API and a Web UI. It operates a background loop, monitors watch directories, and processes files according to a schedule.
2. **CLI Mode (`--cli`):** Runs a one-off scan and transcode job over provided directories and exits upon completion.
### `src/main.rs` & `src/lib.rs`
- **Boot Sequence:** Reads command line arguments using `clap`. Initializes the application environment, logging (`tracing`), and database. It dynamically detects hardware (GPU/CPU encoders) and sets up the orchestrator and `Agent` (processor).
- **Configuration Reloading:** Monitors `config.toml` changes using `notify`. On change, it dynamically re-applies configuration, re-detects hardware capabilities, updates concurrent limits, and resets file watchers.
### `src/server.rs`
- The REST API built with `axum`. Serves the frontend (via `rust-embed` or local files) and exposes endpoints to control jobs, retrieve stats, manage configuration, browse the server filesystem (`fs_browser`), and handle authentication (sessions and argon2 password hashing).
- Exposes Server-Sent Events (SSE) at `/api/events` to push realtime logs, progress updates, and job state changes to the web dashboard.
- Utilizes `RateLimitEntry` to manage global and login rate limits.
---
## 2. Core State & Data Management
### `src/db.rs`
- Built on `sqlx` and SQLite (`alchemist.db`), using Write-Ahead Logging (WAL) for concurrency.
- **Job Tracking:** Stores the state (`queued`, `analyzing`, `encoding`, `completed`, `skipped`, `failed`, `cancelled`), retry attempts, priority, progress, and logs for each media file.
- **State Projection:** To ensure robust interaction between the web UI and transcode engine, certain configurations (watch directories, schedule windows, UI preferences, notifications) are "projected" from the central `.toml` config to the database.
- Handles atomic enqueueing of jobs, deduping by file modification time (`mtime_hash`). Includes robust stats aggregation functions (`get_aggregated_stats`, `get_daily_stats`).
### `src/config.rs` & `src/settings.rs`
- Defines the hierarchical structure of `config.toml` (`TranscodeConfig`, `HardwareConfig`, `ScannerConfig`, `ScheduleConfig`, etc.).
- Enums handle configuration mappings like `QualityProfile` (Quality/Balanced/Speed) and map them to their FFmpeg respective CRF/preset flags.
- `settings.rs` manages the logic of hydrating the database with the active state of `config.toml` via `save_config_and_project()`.
---
## 3. Media Pipeline
The processing of media files involves a multi-stage pipeline, orchestrating scanning, probing, decision making, execution, and verification.
### `src/media/pipeline.rs`
This module defines the architectural interfaces and data structures of the entire transcode process.
- **Interfaces:** `Analyzer` (probes the file), `Planner` (decides what to do), `Executor` (runs the transcode).
- **Structures:** `MediaMetadata`, `MediaAnalysis`, `TranscodePlan`, `ExecutionResult`.
- **Pipeline Loop (`Pipeline::process_job`):**
1. Verifies the input and temp output paths.
2. Runs the Analyzer.
3. Runs the Planner. If `TranscodeDecision::Skip` is returned, marks the job as skipped.
4. Dispatches the TranscodePlan to the Executor.
5. Computes VMAF scores (if enabled) against the temporary transcoded artifact to ensure quality hasn't drastically degraded.
6. Promotes the artifact to the final path and updates the database with exact encode sizes and statistics.
### `src/media/processor.rs` (`Agent`)
- Acts as the background task runner. Manages a `tokio::sync::Semaphore` based on the configured concurrency limit.
- Sits in an infinite loop claiming queued jobs from the database and spawning asynchronous tasks to run `Pipeline::process_job`.
- Exposes `pause()`, `resume()`, and `set_scheduler_paused()` hooks to control the global state of the engine.
### `src/media/analyzer.rs` (`FfmpegAnalyzer`)
- Wraps `ffprobe` using a blocking OS process.
- Parses video metadata (duration, FPS, resolution, codec, BPP, 10-bit color, HDR transfer functions like PQ/HLG) and outputs an `AnalysisConfidence` (High, Medium, Low) based on how complete the metadata is.
### `src/media/planner.rs` (`BasicPlanner`)
The intelligence layer of Alchemist.
- **Decision Engine (`should_transcode`):** Skips files that are already the target codec and 10-bit. Calculates the Bits-Per-Pixel (BPP) and normalizes it based on resolution. If the BPP is lower than the quality threshold (to avoid generational quality loss), or if the file is smaller than `min_file_size_mb`, it skips the file.
- **Encoder Selection (`select_encoder`):** Evaluates `HardwareInfo` (available hardware backends) against the requested output codec. It prefers GPU encoders (NVENC, QSV, VAAPI, AMF, VideoToolbox) and falls back to CPU encoders (SVT-AV1, libx265) only if configured.
- **Subtitles & Audio:** Determines if audio can be copied or must be transcoded (Opus/AAC) based on container compatibility (e.g. mp4 vs mkv) and "heavy" codecs (TrueHD/FLAC). Plans subtitle burn-ins or sidecar extraction (`.mks`).
### `src/media/executor.rs` (`FfmpegExecutor`)
- Implements the `Executor` trait. Links `TranscodePlan` to the `Transcoder`.
- Provides an implementation of `ExecutionObserver` which listens to standard error outputs from FFmpeg to persist textual logs and calculated percentage progress to the database and SSE broadcast channel.
- Runs a post-transcode probe on the output to ensure the actual executed codec and hardware tags match the requested plan (detecting transparent failures).
---
## 4. Execution & Orchestration
### `src/orchestrator.rs` (`Transcoder`)
- A robust, low-level wrapper around the `ffmpeg` subprocess.
- Manages an internal state of `cancel_channels` and `pending_cancels` (`HashMap<i64, oneshot::Sender<()>>`). If a job is cancelled via the UI, it sends a kill signal to the exact tokio process.
- Streams FFmpeg output to observers line-by-line while simultaneously detecting crashes, emitting `AlchemistError::FFmpeg` with the last 20 lines of standard error context if the process fails.
---
## 5. System Components
### `src/system/hardware.rs`
- Automatically probes the host for GPU acceleration capabilities.
- Determines the active vendor (Nvidia, AMD, Intel, Apple, CPU).
- Executes dummy FFmpeg `lavfi` (black frame) encode tests against known hardware encoder strings (e.g. `hevc_vaapi`, `av1_qsv`, `h264_nvenc`) to empirically verify that the system environment/drivers are correctly configured before claiming an encoder is available.
- Handles explicit `/dev/dri/renderD128` overrides for Linux Docker containers.
### `src/media/scanner.rs`
- Utilizes `rayon` for fast, parallel recursive file-system scanning.
- Filters target directories based on user-defined file extensions. Returns a sorted list of `DiscoveredMedia` ready for DB ingestion.
### `src/scheduler.rs`
- Checks an array of configured `ScheduleWindow` records from the DB every 60 seconds.
- Calculates if the current minute of the current day lies within an active window.
- Dynamically invokes `agent.set_scheduler_paused()` to restrict the CPU/GPU workload outside of allowed server hours.
---
## Summary
Alchemist combines a highly concurrent Rust backend (`tokio`, `axum`) with empirical validation mechanisms (VMAF scoring, FFmpeg test probes). Its architecture heavily isolates **Planning** (heuristic decision logic) from **Execution** (running the transcode), ensuring that the system can gracefully fall back, test different hardware topologies, and avoid re-transcoding media without degrading video quality.

File diff suppressed because it is too large Load Diff

View File

@@ -203,8 +203,8 @@ On first launch, Alchemist runs an interactive setup wizard to:
## Configuration
Configuration is stored at `config.toml` by default. Set `ALCHEMIST_CONFIG_PATH` to override the path.
Database is stored at `alchemist.db` by default. Set `ALCHEMIST_DB_PATH` to override the path.
Configuration is stored at `~/.openbitdo/config.toml` by default on Linux and macOS, and `./config.toml` elsewhere. Set `ALCHEMIST_CONFIG_PATH` to override the path.
Database is stored at `~/.openbitdo/alchemist.db` by default on Linux and macOS, and `./alchemist.db` elsewhere. Set `ALCHEMIST_DB_PATH` to override the path.
If `ALCHEMIST_CONFIG_MUTABLE=false`, settings/setup endpoints will return HTTP `409` for config write attempts.
### Full Configuration Reference
@@ -293,9 +293,9 @@ host = "0.0.0.0"
| Variable | Description | Default |
|----------|-------------|---------|
| `ALCHEMIST_CONFIG_PATH` | Primary config file path | `./config.toml` |
| `ALCHEMIST_CONFIG_PATH` | Primary config file path | `~/.openbitdo/config.toml` on Linux/macOS, `./config.toml` elsewhere |
| `ALCHEMIST_CONFIG` | Legacy alias for config path | unset |
| `ALCHEMIST_DB_PATH` | SQLite database file path | `./alchemist.db` |
| `ALCHEMIST_DB_PATH` | SQLite database file path | `~/.openbitdo/alchemist.db` on Linux/macOS, `./alchemist.db` elsewhere |
| `ALCHEMIST_DATA_DIR` | Legacy data dir fallback for DB (`<dir>/alchemist.db`) | unset |
| `ALCHEMIST_CONFIG_MUTABLE` | Enable/disable runtime config writes | `true` |
@@ -973,14 +973,32 @@ docker build --platform linux/amd64 -t alchemist .
### Versioning
Version is tracked in two places that must stay in sync:
Version metadata is tracked in three files that must stay in sync:
- `VERSION` file (read by Docker workflow)
- `Cargo.toml` `version` field
- `web/package.json` `version` field
To release:
1. Update both files to new version
2. Commit: `git commit -m "v0.2.5: Description"`
3. Push to master (triggers Docker build)
Supporting release metadata must also be updated for every cut:
- `CHANGELOG.md` top release entry
- `docs/Documentation.md` changelog section and footer
- `Cargo.lock` root package version should be refreshed after the Cargo version bump
Recommended workflow:
1. Run `./scripts/bump_version.sh 0.2.10-rc.1`
2. Update `CHANGELOG.md` and the docs changelog entry with release notes
3. Run `cargo test --quiet`
4. Run `bun run verify` in `web/`
5. Run `bun run test:reliability` in `web-e2e/`
6. Push the merged release commit to `main` so the Docker workflow publishes `:0.2.10-rc.1`
7. Stable versions additionally publish `:latest`; prereleases must not
8. Use `workflow_dispatch` on `.github/workflows/release.yml` as a dry run before tagging if you want to exercise the build matrix without publishing assets
9. Create annotated tag `v0.2.10-rc.1` on that exact merged commit to publish Windows, macOS, and Linux prerelease assets
Important:
- Docker publishing is driven by `VERSION` on push to `main`
- Binary/app releases are driven by the `v*` git tag
- GitHub releases from `-rc.` tags must be marked prerelease and must not become latest
- Do not create the release tag from a dirty or unmerged worktree
---
@@ -1228,6 +1246,13 @@ A:
## Changelog
### v0.2.10-rc.1
- ✅ Job lifecycle safety pass: queued vs active cancel handling, active-job delete/restart blocking, batch conflict responses, and stricter DB persistence checks
- ✅ Output safety upgrade: `output_root`, mirrored destination paths, temp-file promotion, and non-destructive replace flow
- ✅ Scheduler/watch/setup parity: immediate schedule refresh, Intel Arc H.264 detection fix, H.264 setup option, canonicalized watch folders, and recursive watch-folder controls
- ✅ Jobs/settings UX improvements: per-job priority controls, output-root file settings, active-job-safe actions, and Astro router deprecation cleanup
- ✅ RC prep hardening: version metadata synced across app/web/Docker inputs and release instructions updated for prerelease-vs-stable publishing
### v0.2.8
- ✅ Default server mode; explicit CLI with `--cli --dir ...` and new `--reset-auth` flow
- ✅ Login redirects to setup when no users exist
@@ -1296,4 +1321,4 @@ Alchemist is licensed under the **GPL-3.0 License**. See `LICENSE` for details.
---
*Documentation for Alchemist v0.2.8*
*Documentation for Alchemist v0.2.10-rc.1*

View File

@@ -0,0 +1 @@
ALTER TABLE file_settings ADD COLUMN output_root TEXT;

View File

@@ -0,0 +1,9 @@
ALTER TABLE jobs ADD COLUMN archived BOOLEAN NOT NULL DEFAULT 0;
CREATE INDEX IF NOT EXISTS idx_jobs_archived_status_updated_at
ON jobs(archived, status, updated_at);
INSERT OR REPLACE INTO schema_info (key, value) VALUES
('schema_version', '3'),
('min_compatible_version', '0.2.5'),
('last_updated', datetime('now'));

View File

@@ -2,7 +2,7 @@
set -euo pipefail
if [ $# -ne 1 ]; then
echo "Usage: $0 <version>" >&2
echo "Usage: $0 <version> # example: 0.2.10 or 0.2.10-rc.1" >&2
exit 1
fi
@@ -47,29 +47,35 @@ if count == 0:
raise SystemExit("Failed to update web/package.json version field.")
pkg.write_text(new_text)
# CHANGELOG.md (top entry)
changelog = root / "CHANGELOG.md"
text = changelog.read_text()
text = re.sub(r'(?m)^## \[v[^\]]+\]', f'## [v{version}]', text, count=1)
changelog.write_text(text)
# Cargo.lock (root package entry)
lock = root / "Cargo.lock"
text = lock.read_text()
text = re.sub(
r'(?ms)(\[\[package\]\]\nname = "alchemist"\nversion = )"[^"]+"',
rf'\1"{version}"',
text,
count=1,
)
lock.write_text(text)
# docs/Documentation.md footer + latest changelog entry
# docs/Documentation.md footer
docs = root / "docs" / "Documentation.md"
text = docs.read_text()
text = re.sub(r'(?m)^\*Documentation for Alchemist v[^*]+\*', f'*Documentation for Alchemist v{version}*', text, count=1)
# Update the first changelog entry version after the Changelog header
m = re.search(r'(## Changelog\n\n)(### v[^\n]+)', text)
if m:
prefix = m.group(1)
text = text.replace(m.group(2), f'### v{version}', 1)
docs.write_text(text)
print(f"Updated version to {version}")
PY
read -r -p "Create git tag (e.g., v${VERSION}) or leave blank to skip: " TAG
if [ -n "$TAG" ]; then
git -C "$ROOT_DIR" tag -a "$TAG" -m "$TAG"
echo "Created tag: $TAG"
fi
cat <<EOF
Next steps:
1. Update CHANGELOG.md and docs/Documentation.md release notes for v${VERSION}
2. Run cargo test --quiet
3. Run bun run verify (in web/)
4. Run bun run test:reliability (in web-e2e/)
5. Merge the release-prep commit to main so Docker publishes ${VERSION}
6. Stable versions also publish latest; prereleases must not
7. Create annotated tag v${VERSION} on that exact merged commit for binary releases
Note: this script no longer creates git tags. Tags must be created separately after merge.
EOF

View File

@@ -426,6 +426,7 @@ impl Db {
ON CONFLICT(input_path) DO UPDATE SET
output_path = excluded.output_path,
status = CASE WHEN mtime_hash != excluded.mtime_hash THEN 'queued' ELSE status END,
archived = 0,
mtime_hash = excluded.mtime_hash,
updated_at = CURRENT_TIMESTAMP
WHERE mtime_hash != excluded.mtime_hash OR output_path != excluded.output_path",
@@ -465,7 +466,7 @@ impl Db {
COALESCE(attempt_count, 0) as attempt_count,
NULL as vmaf_score,
created_at, updated_at
FROM jobs WHERE status = 'queued'
FROM jobs WHERE status = 'queued' AND archived = 0
ORDER BY priority DESC, created_at ASC LIMIT 1",
)
.fetch_optional(&self.pool)
@@ -479,7 +480,7 @@ impl Db {
"UPDATE jobs
SET status = 'analyzing', updated_at = CURRENT_TIMESTAMP
WHERE id = (
SELECT id FROM jobs WHERE status = 'queued'
SELECT id FROM jobs WHERE status = 'queued' AND archived = 0
ORDER BY priority DESC, created_at ASC LIMIT 1
)
RETURNING id, input_path, output_path, status, NULL as decision_reason,
@@ -532,6 +533,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE j.archived = 0
ORDER BY j.updated_at DESC",
)
.fetch_all(&self.pool)
@@ -652,7 +654,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE j.id = ?",
WHERE j.id = ? AND j.archived = 0",
)
.bind(id)
.fetch_optional(&self.pool)
@@ -672,7 +674,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE j.status = ?
WHERE j.status = ? AND j.archived = 0
ORDER BY j.priority DESC, j.created_at ASC",
)
.bind(status)
@@ -701,7 +703,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE 1=1 "
WHERE j.archived = 0 "
);
if let Some(statuses) = statuses {
@@ -799,7 +801,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE j.id = ?",
WHERE j.id = ? AND j.archived = 0",
)
.bind(id)
.fetch_optional(&self.pool)
@@ -822,7 +824,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE j.id IN (",
WHERE j.archived = 0 AND j.id IN (",
);
let mut separated = qb.separated(", ");
for id in ids {
@@ -910,7 +912,7 @@ impl Db {
(SELECT vmaf_score FROM encode_stats WHERE job_id = j.id) as vmaf_score,
j.created_at, j.updated_at
FROM jobs j
WHERE j.input_path = ?",
WHERE j.input_path = ? AND j.archived = 0",
)
.bind(path)
.fetch_optional(&self.pool)
@@ -1309,16 +1311,24 @@ impl Db {
}
pub async fn restart_failed_jobs(&self) -> Result<u64> {
let result = sqlx::query("UPDATE jobs SET status = 'queued', updated_at = CURRENT_TIMESTAMP WHERE status IN ('failed', 'cancelled')")
.execute(&self.pool)
.await?;
let result = sqlx::query(
"UPDATE jobs
SET status = 'queued', progress = 0.0, updated_at = CURRENT_TIMESTAMP
WHERE status IN ('failed', 'cancelled') AND archived = 0",
)
.execute(&self.pool)
.await?;
Ok(result.rows_affected())
}
pub async fn clear_completed_jobs(&self) -> Result<u64> {
let result = sqlx::query("DELETE FROM jobs WHERE status = 'completed'")
.execute(&self.pool)
.await?;
let result = sqlx::query(
"UPDATE jobs
SET archived = 1, updated_at = CURRENT_TIMESTAMP
WHERE status = 'completed' AND archived = 0",
)
.execute(&self.pool)
.await?;
Ok(result.rows_affected())
}
@@ -1692,4 +1702,53 @@ mod tests {
let _ = std::fs::remove_file(db_path);
Ok(())
}
#[tokio::test]
async fn clear_completed_archives_jobs_but_preserves_encode_stats(
) -> std::result::Result<(), Box<dyn std::error::Error>> {
let mut db_path = std::env::temp_dir();
let token: u64 = rand::random();
db_path.push(format!("alchemist_archive_completed_{}.db", token));
let db = Db::new(db_path.to_string_lossy().as_ref()).await?;
let input = Path::new("movie.mkv");
let output = Path::new("movie-alchemist.mkv");
let _ = db
.enqueue_job(input, output, SystemTime::UNIX_EPOCH)
.await?;
let job = db
.get_job_by_input_path("movie.mkv")
.await?
.ok_or_else(|| std::io::Error::other("missing job"))?;
db.update_job_status(job.id, JobState::Completed).await?;
db.save_encode_stats(EncodeStatsInput {
job_id: job.id,
input_size: 2_000,
output_size: 1_000,
compression_ratio: 0.5,
encode_time: 42.0,
encode_speed: 1.2,
avg_bitrate: 800.0,
vmaf_score: Some(96.5),
})
.await?;
let cleared = db.clear_completed_jobs().await?;
assert_eq!(cleared, 1);
assert!(db.get_job_by_id(job.id).await?.is_none());
assert!(db.get_job_by_input_path("movie.mkv").await?.is_none());
let visible_completed = db.get_jobs_by_status(JobState::Completed).await?;
assert!(visible_completed.is_empty());
let aggregated = db.get_aggregated_stats().await?;
assert_eq!(aggregated.completed_jobs, 1);
assert_eq!(aggregated.total_input_size, 2_000);
assert_eq!(aggregated.total_output_size, 1_000);
drop(db);
let _ = std::fs::remove_file(db_path);
Ok(())
}
}

View File

@@ -85,6 +85,24 @@ async fn apply_reloaded_config(
Ok(detected_hardware)
}
fn config_watch_target(config_path: &Path) -> &Path {
config_path
.parent()
.filter(|parent| !parent.as_os_str().is_empty())
.unwrap_or_else(|| Path::new("."))
}
fn should_reload_config_for_event(event: &notify::Event, config_path: &Path) -> bool {
if !event.paths.iter().any(|path| path == config_path) {
return false;
}
matches!(
&event.kind,
notify::EventKind::Create(_) | notify::EventKind::Modify(_) | notify::EventKind::Any
)
}
async fn run() -> Result<()> {
// Initialize logging
tracing_subscriber::fmt()
@@ -418,10 +436,12 @@ async fn run() -> Result<()> {
match watcher_res {
Ok(mut watcher) => {
if let Err(e) =
watcher.watch(config_watch_path.as_path(), RecursiveMode::NonRecursive)
{
error!("Failed to watch config file {:?}: {}", config_watch_path, e);
let watch_target = config_watch_target(config_watch_path.as_path()).to_path_buf();
if let Err(e) = watcher.watch(watch_target.as_path(), RecursiveMode::NonRecursive) {
error!(
"Failed to watch config path {:?} via {:?}: {}",
config_watch_path, watch_target, e
);
} else {
// Prevent watcher from dropping by keeping it in the spawn if needed,
// or just spawning the processing loop.
@@ -433,8 +453,8 @@ async fn run() -> Result<()> {
let _watcher = watcher;
while let Some(event) = rx_notify.recv().await {
if let notify::EventKind::Modify(_) = event.kind {
info!("Config file changed. Reloading...");
if should_reload_config_for_event(&event, config_watch_path.as_path()) {
info!("Config file changed ({:?}). Reloading...", &event.kind);
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
match apply_reloaded_config(
@@ -541,6 +561,10 @@ async fn run() -> Result<()> {
mod tests {
use super::*;
use clap::Parser;
use notify::{
event::{CreateKind, ModifyKind, RenameMode},
Event, EventKind,
};
fn temp_db_path(prefix: &str) -> PathBuf {
let mut db_path = std::env::temp_dir();
db_path.push(format!("{prefix}_{}.db", rand::random::<u64>()));
@@ -558,6 +582,40 @@ mod tests {
assert!(Args::try_parse_from(["alchemist", "--output-dir", "/tmp/out"]).is_err());
}
#[test]
fn config_reload_matches_create_modify_and_rename_events() {
let config_path = PathBuf::from("/tmp/alchemist-config.toml");
let create = Event {
kind: EventKind::Create(CreateKind::File),
paths: vec![config_path.clone()],
attrs: Default::default(),
};
assert!(should_reload_config_for_event(&create, &config_path));
let rename = Event {
kind: EventKind::Modify(ModifyKind::Name(RenameMode::To)),
paths: vec![
PathBuf::from("/tmp/alchemist-config.toml.tmp"),
config_path.clone(),
],
attrs: Default::default(),
};
assert!(should_reload_config_for_event(&rename, &config_path));
let unrelated = Event {
kind: EventKind::Modify(ModifyKind::Data(notify::event::DataChange::Content)),
paths: vec![PathBuf::from("/tmp/other.toml")],
attrs: Default::default(),
};
assert!(!should_reload_config_for_event(&unrelated, &config_path));
assert_eq!(
config_watch_target(config_path.as_path()),
Path::new("/tmp")
);
}
#[tokio::test]
async fn config_reload_refreshes_runtime_hardware_state(
) -> std::result::Result<(), Box<dyn std::error::Error>> {

View File

@@ -752,20 +752,36 @@ impl Pipeline {
return Err(JobFailure::EncoderUnavailable);
}
self.finalize_job(
job,
&file_path,
FinalizeJobContext {
output_path: &output_path,
temp_output_path: &temp_output_path,
plan: &plan,
start_time,
if let Err(err) = self
.finalize_job(
job.clone(),
&file_path,
FinalizeJobContext {
output_path: &output_path,
temp_output_path: &temp_output_path,
plan: &plan,
start_time,
metadata,
execution_result: &result,
},
)
.await
{
self.handle_finalize_failure(
job.id,
&plan,
metadata,
execution_result: &result,
},
)
.await
.map_err(|_| JobFailure::Transient)
&result,
&config_snapshot,
start_time,
&temp_output_path,
&err,
)
.await;
return Err(JobFailure::Transient);
}
Ok(())
}
Err(e) => {
if temp_output_path.exists() {
@@ -1041,6 +1057,57 @@ impl Pipeline {
Ok(())
}
async fn handle_finalize_failure(
&self,
job_id: i64,
plan: &TranscodePlan,
metadata: &MediaMetadata,
execution_result: &ExecutionResult,
config_snapshot: &crate::config::Config,
start_time: std::time::Instant,
temp_output_path: &Path,
err: &crate::error::AlchemistError,
) {
tracing::error!("Job {}: Finalization failed: {}", job_id, err);
let message = format!("Finalization failed: {err}");
let _ = self.db.add_log("error", Some(job_id), &message).await;
if temp_output_path.exists() {
if let Err(cleanup_err) = tokio::fs::remove_file(temp_output_path).await {
tracing::warn!(
"Job {}: Failed to remove temp output after finalize error {:?}: {}",
job_id,
temp_output_path,
cleanup_err
);
}
}
cleanup_temp_subtitle_output(job_id, plan).await;
self.emit_telemetry_event(TelemetryEventParams {
telemetry_enabled: config_snapshot.system.enable_telemetry,
output_codec: execution_result
.actual_output_codec
.unwrap_or(execution_result.planned_output_codec),
encoder_override: execution_result.actual_encoder_name.as_deref(),
fallback: plan.fallback.as_ref(),
metadata,
event_type: "job_finished",
status: Some("failure"),
failure_reason: Some("finalize_failed"),
input_size_bytes: Some(metadata.size_bytes),
output_size_bytes: None,
duration_ms: Some(start_time.elapsed().as_millis() as u64),
speed_factor: None,
})
.await;
let _ = self
.update_job_state(job_id, crate::db::JobState::Failed)
.await;
}
async fn emit_telemetry_event(&self, params: TelemetryEventParams<'_>) {
if !params.telemetry_enabled {
return;
@@ -1120,6 +1187,10 @@ fn map_failure(error: &crate::error::AlchemistError) -> JobFailure {
mod tests {
use super::*;
use crate::db::Db;
use crate::system::hardware::{HardwareInfo, HardwareState, Vendor};
use crate::Transcoder;
use std::sync::Arc;
use tokio::sync::{broadcast, RwLock};
#[test]
fn generated_output_pattern_matches_default_suffix() {
@@ -1210,4 +1281,142 @@ mod tests {
let _ = std::fs::remove_dir_all(temp_root);
Ok(())
}
#[tokio::test]
async fn finalize_failure_marks_job_failed_and_cleans_temp_output() -> anyhow::Result<()> {
let db_path = std::env::temp_dir().join(format!(
"alchemist_finalize_failure_{}.db",
rand::random::<u64>()
));
let temp_root = std::env::temp_dir().join(format!(
"alchemist_finalize_failure_{}",
rand::random::<u64>()
));
std::fs::create_dir_all(&temp_root)?;
let db = Arc::new(Db::new(db_path.to_string_lossy().as_ref()).await?);
let input = temp_root.join("movie.mkv");
let output = temp_root.join("movie-alchemist.mkv");
std::fs::write(&input, b"source")?;
let _ = db
.enqueue_job(&input, &output, SystemTime::UNIX_EPOCH)
.await?;
let job = db
.get_job_by_input_path(input.to_string_lossy().as_ref())
.await?
.ok_or_else(|| anyhow::anyhow!("missing queued job"))?;
db.update_job_status(job.id, crate::db::JobState::Encoding)
.await?;
let temp_output = temp_output_path_for(&output);
std::fs::write(&temp_output, b"partial")?;
let config = Arc::new(RwLock::new(crate::config::Config::default()));
let hardware_state = HardwareState::new(Some(HardwareInfo {
vendor: Vendor::Cpu,
device_path: None,
supported_codecs: vec!["av1".to_string(), "hevc".to_string(), "h264".to_string()],
backends: Vec::new(),
}));
let (tx, _rx) = broadcast::channel(8);
let pipeline = Pipeline::new(
db.clone(),
Arc::new(Transcoder::new()),
config.clone(),
hardware_state,
Arc::new(tx),
true,
);
let plan = TranscodePlan {
decision: TranscodeDecision::Transcode {
reason: "test".to_string(),
},
output_path: Some(temp_output.clone()),
container: "mkv".to_string(),
requested_codec: crate::config::OutputCodec::H264,
output_codec: Some(crate::config::OutputCodec::H264),
encoder: Some(Encoder::H264X264),
backend: Some(EncoderBackend::Cpu),
rate_control: Some(RateControl::Crf { value: 21 }),
encoder_preset: Some("medium".to_string()),
threads: 0,
audio: AudioStreamPlan::Copy,
subtitles: SubtitleStreamPlan::Drop,
filters: Vec::new(),
allow_fallback: true,
fallback: None,
};
let metadata = MediaMetadata {
path: input.clone(),
duration_secs: 12.0,
codec_name: "h264".to_string(),
width: 1920,
height: 1080,
bit_depth: Some(8),
color_primaries: None,
color_transfer: None,
color_space: None,
color_range: None,
size_bytes: 2_000,
video_bitrate_bps: Some(5_000_000),
container_bitrate_bps: Some(5_500_000),
fps: 24.0,
container: "mkv".to_string(),
audio_codec: Some("aac".to_string()),
audio_bitrate_bps: Some(192_000),
audio_channels: Some(2),
audio_is_heavy: false,
subtitle_streams: Vec::new(),
dynamic_range: DynamicRange::Sdr,
};
let result = ExecutionResult {
requested_codec: crate::config::OutputCodec::H264,
planned_output_codec: crate::config::OutputCodec::H264,
requested_encoder: Encoder::H264X264,
used_encoder: Encoder::H264X264,
used_backend: EncoderBackend::Cpu,
fallback: None,
fallback_occurred: false,
actual_output_codec: Some(crate::config::OutputCodec::H264),
actual_encoder_name: Some("libx264".to_string()),
stats: ExecutionStats {
encode_time_secs: 0.0,
input_size: 0,
output_size: 0,
vmaf: None,
},
};
let config_snapshot = config.read().await.clone();
pipeline
.handle_finalize_failure(
job.id,
&plan,
&metadata,
&result,
&config_snapshot,
std::time::Instant::now(),
&temp_output,
&crate::error::AlchemistError::Unknown("disk full".to_string()),
)
.await;
let updated = db
.get_job_by_id(job.id)
.await?
.ok_or_else(|| anyhow::anyhow!("missing failed job"))?;
assert_eq!(updated.status, crate::db::JobState::Failed);
assert!(!temp_output.exists());
let logs = db.get_logs(10, 0).await?;
assert!(logs
.iter()
.any(|entry| entry.message.contains("Finalization failed")));
let _ = std::fs::remove_dir_all(temp_root);
let _ = std::fs::remove_file(db_path);
Ok(())
}
}

View File

@@ -91,6 +91,10 @@ impl Agent {
self.scheduler_paused.load(Ordering::SeqCst)
}
pub fn concurrent_jobs_limit(&self) -> usize {
self.semaphore_limit.load(Ordering::SeqCst)
}
pub fn set_scheduler_paused(&self, paused: bool) {
let current = self.scheduler_paused.load(Ordering::SeqCst);
if current != paused {

View File

@@ -1,6 +1,7 @@
use std::env;
use std::path::{Path, PathBuf};
const APP_HOME_DIR: &str = ".openbitdo";
const DEFAULT_CONFIG_PATH: &str = "config.toml";
const DEFAULT_DB_PATH: &str = "alchemist.db";
@@ -12,11 +13,42 @@ fn parse_bool_env(value: &str) -> Option<bool> {
}
}
fn default_home_root_for(home: Option<&Path>) -> Option<PathBuf> {
#[cfg(any(target_os = "linux", target_os = "macos"))]
{
home.filter(|path| !path.as_os_str().is_empty())
.map(|path| path.join(APP_HOME_DIR))
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
{
let _ = home;
None
}
}
fn default_home_root() -> Option<PathBuf> {
let home = env::var_os("HOME").map(PathBuf::from);
default_home_root_for(home.as_deref())
}
fn default_config_path() -> PathBuf {
default_home_root()
.map(|root| root.join(DEFAULT_CONFIG_PATH))
.unwrap_or_else(|| PathBuf::from(DEFAULT_CONFIG_PATH))
}
fn default_db_path() -> PathBuf {
default_home_root()
.map(|root| root.join(DEFAULT_DB_PATH))
.unwrap_or_else(|| PathBuf::from(DEFAULT_DB_PATH))
}
pub fn config_path() -> PathBuf {
env::var("ALCHEMIST_CONFIG_PATH")
.or_else(|_| env::var("ALCHEMIST_CONFIG"))
.map(PathBuf::from)
.unwrap_or_else(|_| PathBuf::from(DEFAULT_CONFIG_PATH))
.unwrap_or_else(|_| default_config_path())
}
pub fn db_path() -> PathBuf {
@@ -28,7 +60,7 @@ pub fn db_path() -> PathBuf {
return Path::new(&data_dir).join(DEFAULT_DB_PATH);
}
PathBuf::from(DEFAULT_DB_PATH)
default_db_path()
}
pub fn config_mutable() -> bool {
@@ -37,3 +69,33 @@ pub fn config_mutable() -> bool {
Err(_) => true,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(any(target_os = "linux", target_os = "macos"))]
#[test]
fn default_home_root_uses_openbitdo_directory() {
let home = Path::new("/Users/tester");
assert_eq!(
default_home_root_for(Some(home)),
Some(home.join(".openbitdo"))
);
}
#[cfg(any(target_os = "linux", target_os = "macos"))]
#[test]
fn default_paths_live_under_openbitdo() {
let root = default_home_root_for(Some(Path::new("/Users/tester")))
.expect("expected home root on unix-like target");
assert_eq!(
root.join(DEFAULT_CONFIG_PATH),
PathBuf::from("/Users/tester/.openbitdo/config.toml")
);
assert_eq!(
root.join(DEFAULT_DB_PATH),
PathBuf::from("/Users/tester/.openbitdo/alchemist.db")
);
}
}

View File

@@ -956,10 +956,11 @@ async fn setup_complete_handler(
let mut next_config = match payload.settings {
Some(mut settings) => {
settings.scanner.directories = match normalize_setup_directories(&settings.scanner.directories) {
Ok(paths) => paths,
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
};
settings.scanner.directories =
match normalize_setup_directories(&settings.scanner.directories) {
Ok(paths) => paths,
Err(msg) => return (StatusCode::BAD_REQUEST, msg).into_response(),
};
settings
}
None => {
@@ -986,6 +987,8 @@ async fn setup_complete_handler(
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
}
let runtime_concurrent_jobs = next_config.transcode.concurrent_jobs;
let hardware_info =
match crate::system::hardware::detect_hardware_for_config(&next_config).await {
Ok(info) => info,
@@ -1044,7 +1047,7 @@ async fn setup_complete_handler(
state.setup_required.store(false, Ordering::Relaxed);
state
.agent
.set_concurrent_jobs(payload.concurrent_jobs)
.set_concurrent_jobs(runtime_concurrent_jobs)
.await;
state.hardware_state.replace(Some(hardware_info)).await;
refresh_file_watcher(&state).await;
@@ -1064,7 +1067,11 @@ async fn setup_complete_handler(
let cookie = build_session_cookie(&token);
(
[(header::SET_COOKIE, cookie)],
axum::Json(serde_json::json!({ "status": "saved" })),
axum::Json(serde_json::json!({
"status": "saved",
"message": "Setup completed successfully.",
"concurrent_jobs": runtime_concurrent_jobs
})),
)
.into_response()
}
@@ -1154,7 +1161,7 @@ struct StatsData {
concurrent_limit: usize,
}
async fn get_stats_data(db: &Db, config: &Config) -> Result<StatsData> {
async fn get_stats_data(db: &Db, concurrent_limit: usize) -> Result<StatsData> {
let s = db.get_stats().await?;
let total = s
.as_object()
@@ -1177,13 +1184,12 @@ async fn get_stats_data(db: &Db, config: &Config) -> Result<StatsData> {
completed,
active,
failed,
concurrent_limit: config.transcode.concurrent_jobs,
concurrent_limit,
})
}
async fn stats_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let config = state.config.read().await;
match get_stats_data(&state.db, &config).await {
match get_stats_data(&state.db, state.agent.concurrent_jobs_limit()).await {
Ok(stats) => axum::Json(serde_json::json!({
"total": stats.total,
"completed": stats.completed,
@@ -1264,13 +1270,38 @@ async fn cancel_job_handler(
}
async fn restart_failed_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let _ = state.db.restart_failed_jobs().await;
StatusCode::OK
match state.db.restart_failed_jobs().await {
Ok(count) => {
let message = if count == 0 {
"No failed or cancelled jobs were waiting to be retried.".to_string()
} else if count == 1 {
"Queued 1 failed or cancelled job for retry.".to_string()
} else {
format!("Queued {count} failed or cancelled jobs for retry.")
};
axum::Json(serde_json::json!({ "count": count, "message": message })).into_response()
}
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
}
}
async fn clear_completed_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
let _ = state.db.clear_completed_jobs().await;
StatusCode::OK
match state.db.clear_completed_jobs().await {
Ok(count) => {
let message = if count == 0 {
"No completed jobs were waiting to be cleared.".to_string()
} else if count == 1 {
"Cleared 1 completed job from the queue. Historical stats were preserved."
.to_string()
} else {
format!(
"Cleared {count} completed jobs from the queue. Historical stats were preserved."
)
};
axum::Json(serde_json::json!({ "count": count, "message": message })).into_response()
}
Err(err) => (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()).into_response(),
}
}
async fn pause_engine_handler(State(state): State<Arc<AppState>>) -> impl IntoResponse {
@@ -1294,9 +1325,7 @@ struct FsBrowseQuery {
path: Option<String>,
}
async fn fs_browse_handler(
Query(query): Query<FsBrowseQuery>,
) -> impl IntoResponse {
async fn fs_browse_handler(Query(query): Query<FsBrowseQuery>) -> impl IntoResponse {
match crate::system::fs_browser::browse(query.path.as_deref()).await {
Ok(response) => axum::Json(response).into_response(),
Err(err) => config_read_error_response("browse server filesystem", &err),
@@ -1661,7 +1690,6 @@ async fn system_resources_handler(State(state): State<Arc<AppState>>) -> Respons
Ok(stats) => stats,
Err(err) => return config_read_error_response("load system resource stats", &err),
};
let config = state.config.read().await;
// Query GPU utilization (using spawn_blocking to avoid blocking)
let (gpu_utilization, gpu_memory_percent) = tokio::task::spawn_blocking(query_gpu_utilization)
@@ -1675,7 +1703,7 @@ async fn system_resources_handler(State(state): State<Arc<AppState>>) -> Respons
memory_percent,
uptime_seconds,
active_jobs: stats.active,
concurrent_limit: config.transcode.concurrent_jobs,
concurrent_limit: state.agent.concurrent_jobs_limit(),
cpu_count,
gpu_utilization,
gpu_memory_percent,
@@ -2211,14 +2239,7 @@ async fn restart_job_handler(
if job.is_active() {
return blocked_jobs_response("restart is blocked while the job is active", &[job]);
}
if let Err(e) = state
.db
.update_job_status(job.id, crate::db::JobState::Queued)
.await
{
if is_row_not_found(&e) {
return StatusCode::NOT_FOUND.into_response();
}
if let Err(e) = state.db.batch_restart_jobs(&[job.id]).await {
return (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response();
}
StatusCode::OK.into_response()
@@ -3108,6 +3129,7 @@ mod tests {
.await?;
let mut settings = crate::config::Config::default();
settings.transcode.concurrent_jobs = 3;
settings.scanner.directories = vec![watch_dir.to_string_lossy().to_string()];
settings.appearance.active_theme_id = Some("midnight".to_string());
settings.notifications.targets = vec![crate::config::NotificationTargetConfig {
@@ -3147,9 +3169,14 @@ mod tests {
assert!(!state.setup_required.load(Ordering::Relaxed));
let persisted = crate::config::Config::load(config_path.as_path())?;
assert_eq!(persisted.appearance.active_theme_id.as_deref(), Some("midnight"));
assert_eq!(
persisted.appearance.active_theme_id.as_deref(),
Some("midnight")
);
assert_eq!(persisted.notifications.targets.len(), 1);
assert_eq!(persisted.schedule.windows.len(), 1);
assert_eq!(persisted.transcode.concurrent_jobs, 3);
assert_eq!(state.agent.concurrent_jobs_limit(), 3);
cleanup_paths(&[watch_dir, config_path, db_path]);
Ok(())
@@ -3171,7 +3198,10 @@ mod tests {
.oneshot(
Request::builder()
.method(Method::GET)
.uri(format!("/api/fs/browse?path={}", browse_root.to_string_lossy()))
.uri(format!(
"/api/fs/browse?path={}",
browse_root.to_string_lossy()
))
.body(Body::empty())
.unwrap(),
)
@@ -3542,6 +3572,52 @@ mod tests {
Ok(())
}
#[tokio::test]
async fn clear_completed_archives_jobs_and_preserves_stats(
) -> std::result::Result<(), Box<dyn std::error::Error>> {
let (state, app, config_path, db_path) = build_test_app(false, 8, |_| {}).await?;
let token = create_session(state.db.as_ref()).await?;
let (job, input_path, output_path) =
seed_job(state.db.as_ref(), JobState::Completed).await?;
state
.db
.save_encode_stats(crate::db::EncodeStatsInput {
job_id: job.id,
input_size: 2_000,
output_size: 1_000,
compression_ratio: 0.5,
encode_time: 60.0,
encode_speed: 1.5,
avg_bitrate: 900.0,
vmaf_score: Some(95.0),
})
.await?;
let response = app
.clone()
.oneshot(auth_request(
Method::POST,
"/api/jobs/clear-completed",
&token,
Body::empty(),
))
.await?;
assert_eq!(response.status(), StatusCode::OK);
let body = body_text(response).await;
assert!(body.contains("\"count\":1"));
assert!(body.contains("Historical stats were preserved"));
assert!(state.db.get_job_by_id(job.id).await?.is_none());
let aggregated = state.db.get_aggregated_stats().await?;
assert_eq!(aggregated.completed_jobs, 1);
assert_eq!(aggregated.total_input_size, 2_000);
assert_eq!(aggregated.total_output_size, 1_000);
cleanup_paths(&[input_path, output_path, config_path, db_path]);
Ok(())
}
#[tokio::test]
async fn cancel_queued_job_updates_status(
) -> std::result::Result<(), Box<dyn std::error::Error>> {

View File

@@ -150,16 +150,14 @@ fn browse_blocking(path: &Path) -> Result<FsBrowseResponse> {
})
}
fn recommendations_blocking(config: &Config, extra_dirs: &[String]) -> Result<FsRecommendationsResponse> {
fn recommendations_blocking(
config: &Config,
extra_dirs: &[String],
) -> Result<FsRecommendationsResponse> {
let mut seen = HashSet::new();
let mut recommendations = Vec::new();
for dir in config
.scanner
.directories
.iter()
.chain(extra_dirs.iter())
{
for dir in config.scanner.directories.iter().chain(extra_dirs.iter()) {
if let Ok(path) = canonical_or_original(Path::new(dir)) {
let path_string = path.to_string_lossy().to_string();
if seen.insert(path_string.clone()) {
@@ -237,7 +235,9 @@ fn preview_blocking(request: FsPreviewRequest) -> Result<FsPreviewResponse> {
let readable = exists && canonical.is_dir() && std::fs::read_dir(&canonical).is_ok();
let media_files = if readable {
scanner.scan_with_recursion(vec![(canonical.clone(), true)]).len()
scanner
.scan_with_recursion(vec![(canonical.clone(), true)])
.len()
} else {
0
};
@@ -256,7 +256,8 @@ fn preview_blocking(request: FsPreviewRequest) -> Result<FsPreviewResponse> {
let mut dir_warnings = directory_warnings(&canonical, readable);
if readable && media_files == 0 {
dir_warnings.push("No supported media files were found in this directory.".to_string());
dir_warnings
.push("No supported media files were found in this directory.".to_string());
}
warnings.extend(dir_warnings.clone());
@@ -296,7 +297,9 @@ fn default_browse_root() -> Result<PathBuf> {
return Ok(drive_path);
}
}
Err(AlchemistError::Watch("No accessible drive roots found".to_string()))
Err(AlchemistError::Watch(
"No accessible drive roots found".to_string(),
))
}
#[cfg(not(target_os = "windows"))]
@@ -369,13 +372,7 @@ fn candidate_roots() -> Vec<PathBuf> {
#[cfg(target_os = "linux")]
{
for root in [
"/media",
"/mnt",
"/srv",
"/data",
"/storage",
"/home",
"/var/lib",
"/media", "/mnt", "/srv", "/data", "/storage", "/home", "/var/lib",
] {
roots.insert(PathBuf::from(root));
}
@@ -409,8 +406,19 @@ fn classify_media_hint(path: &Path) -> MediaHint {
.unwrap_or_default()
.to_ascii_lowercase();
let media_names = [
"movies", "movie", "tv", "shows", "series", "anime", "media", "videos", "plex", "emby",
"jellyfin", "library", "downloads",
"movies",
"movie",
"tv",
"shows",
"series",
"anime",
"media",
"videos",
"plex",
"emby",
"jellyfin",
"library",
"downloads",
];
if media_names.iter().any(|candidate| name.contains(candidate)) {
return MediaHint::High;
@@ -433,7 +441,12 @@ fn classify_media_hint(path: &Path) -> MediaHint {
.path()
.extension()
.and_then(|ext| ext.to_str())
.is_some_and(|ext| scanner.extensions.iter().any(|candidate| candidate == &ext.to_ascii_lowercase()))
.is_some_and(|ext| {
scanner
.extensions
.iter()
.any(|candidate| candidate == &ext.to_ascii_lowercase())
})
{
media_files += 1;
if media_files >= 3 {
@@ -479,7 +492,9 @@ fn entry_warning(path: &Path, readable: bool) -> Option<String> {
return Some("Directory is not readable by the Alchemist process.".to_string());
}
if is_system_path(path) {
return Some("System directory. Only choose this if you know your media is stored here.".to_string());
return Some(
"System directory. Only choose this if you know your media is stored here.".to_string(),
);
}
None
}
@@ -490,10 +505,15 @@ fn directory_warnings(path: &Path, readable: bool) -> Vec<String> {
warnings.push("Directory is not readable by the Alchemist process.".to_string());
}
if is_system_path(path) {
warnings.push("This looks like a system path. Avoid scanning operating system folders.".to_string());
warnings.push(
"This looks like a system path. Avoid scanning operating system folders.".to_string(),
);
}
if path.components().count() <= 1 {
warnings.push("Top-level roots can be noisy. Prefer the specific media folder when possible.".to_string());
warnings.push(
"Top-level roots can be noisy. Prefer the specific media folder when possible."
.to_string(),
);
}
warnings
}
@@ -513,7 +533,11 @@ fn is_system_path(path: &Path) -> bool {
"c:\\windows",
"c:\\program files",
];
system_roots.iter().any(|root| value == *root || value.starts_with(&format!("{root}/")) || value.starts_with(&format!("{root}\\")))
system_roots.iter().any(|root| {
value == *root
|| value.starts_with(&format!("{root}/"))
|| value.starts_with(&format!("{root}\\"))
})
}
#[cfg(test)]
@@ -530,7 +554,10 @@ mod tests {
#[test]
fn recommendation_prefers_media_like_names() {
assert_eq!(classify_media_hint(Path::new("/srv/movies")), MediaHint::High);
assert_eq!(
classify_media_hint(Path::new("/srv/movies")),
MediaHint::High
);
}
#[test]
@@ -541,7 +568,8 @@ mod tests {
#[test]
fn preview_detects_media_files_and_samples() {
let root = std::env::temp_dir().join(format!("alchemist_fs_preview_{}", rand::random::<u64>()));
let root =
std::env::temp_dir().join(format!("alchemist_fs_preview_{}", rand::random::<u64>()));
std::fs::create_dir_all(&root).expect("root");
let media_file = root.join("movie.mkv");
std::fs::write(&media_file, b"video").expect("media");

View File

@@ -1,6 +1,6 @@
{
"name": "alchemist-web",
"version": "0.2.9",
"version": "0.2.10-rc.1",
"private": true,
"packageManager": "bun@1",
"type": "module",

View File

@@ -75,6 +75,11 @@ interface JobDetail {
encode_stats?: EncodeStats;
}
interface CountMessageResponse {
count: number;
message: string;
}
type TabType = "all" | "active" | "queued" | "completed" | "failed";
export default function JobManager() {
@@ -327,8 +332,10 @@ export default function JobManager() {
const clearCompleted = async () => {
setActionError(null);
try {
await apiAction("/api/jobs/clear-completed", { method: "POST" });
showToast({ kind: "success", title: "Jobs", message: "Completed jobs cleared." });
const result = await apiJson<CountMessageResponse>("/api/jobs/clear-completed", {
method: "POST",
});
showToast({ kind: "success", title: "Jobs", message: result.message });
await fetchJobs();
} catch (e) {
const message = isApiError(e) ? e.message : "Failed to clear completed jobs";