diff --git a/.idea/alchemist.iml b/.idea/alchemist.iml
index bbe0a70..568915c 100644
--- a/.idea/alchemist.iml
+++ b/.idea/alchemist.iml
@@ -1,5 +1,10 @@
+
+
+
+
+
@@ -8,5 +13,6 @@
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000..5cb71ef
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 0000000..51edfe7
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ddd1d7f..6d1af97 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,32 @@
All notable changes to this project will be documented in this file.
+## [0.3.1-rc.1] - 2026-04-08
+
+### New Features
+
+#### Conversion & Library Workflows
+- **Experimental Conversion / Remux page** — upload a single file, inspect streams, preview the generated FFmpeg command, run a remux/transcode job through Alchemist, and download the result when complete.
+- **Expanded Library Intelligence** — duplicate detection now sits alongside storage-focused recommendation sections for remux-only opportunities, wasteful audio layouts, and commentary/descriptive-track cleanup candidates.
+
+#### Authentication & Automation
+- **Named API tokens** — create bearer tokens from Settings with `read_only` or `full_access` access classes. Tokens are only shown once at creation time and stored server-side as hashes.
+- **OpenAPI contract** — hand-maintained OpenAPI spec added alongside expanded human API docs for auth, token management, and update-check behavior.
+
+#### Notifications
+- **Provider-specific notification targets** — notification settings now use provider-specific configuration payloads instead of the old shared endpoint/token shape.
+- **Provider expansion** — Discord webhook, Discord bot, Gotify, generic webhook, Telegram, and SMTP email targets are supported.
+- **Richer event model** — notification events now distinguish queue/start/completion/failure plus scan completion, engine idle, and daily summary delivery.
+- **Daily summary scheduling** — notifications include a global `daily_summary_time_local` setting and per-target opt-in for digest delivery.
+
+#### Deployment & Distribution
+- **Windows update check** — the About dialog now checks GitHub Releases for the latest stable version and links directly to the release download page when an update is available.
+- **Distribution metadata generation** — in-repo Homebrew and AUR packaging templates plus workflow rendering were added as the foundation for package-manager distribution.
+
+### Documentation
+- **Config path clarity** — docs now consistently describe `~/.config/alchemist/config.toml` as the default host-side config location on Linux/macOS, while Docker examples still use `/app/config/config.toml` inside the container.
+- **Backlog realignment** — the backlog was rewritten around current repo reality, marking large newly implemented surfaces as “Implemented / In Progress” and keeping the roadmap automation-first.
+
## [0.3.0] - 2026-04-06
### Security
diff --git a/README.md b/README.md
index 5a3e95c..8f1eb10 100644
--- a/README.md
+++ b/README.md
@@ -74,6 +74,7 @@ services:
```
Then open [http://localhost:3000](http://localhost:3000) in your browser.
+First-time setup is only reachable from the local network.
On Linux and macOS, the default host-side config location is
`~/.config/alchemist/config.toml`. When you use Docker, the
@@ -132,10 +133,26 @@ just check
The core contributor path is supported on Windows. Broader release and utility recipes remain Unix-first.
+## CLI
+
+Alchemist exposes explicit CLI subcommands:
+
+```bash
+alchemist scan /path/to/media
+alchemist run /path/to/media
+alchemist plan /path/to/media
+alchemist plan /path/to/media --json
+```
+
+- `scan` enqueues matching work and exits
+- `run` scans, enqueues, and waits for processing to finish
+- `plan` analyzes files and reports what Alchemist would do without enqueuing jobs
+
## First Run
1. Open [http://localhost:3000](http://localhost:3000).
2. Complete the setup wizard. It takes about 2 minutes.
+ During first-time setup, the web UI is reachable only from the local network.
3. Add your media folders in Watch Folders.
4. Alchemist scans and starts working automatically.
5. Check the Dashboard to see progress and savings.
@@ -144,8 +161,6 @@ The core contributor path is supported on Windows. Broader release and utility r
- API automation can use bearer tokens created in **Settings → API Tokens**.
- Read-only tokens are limited to observability and monitoring routes.
-- Alchemist can also be served under a subpath such as `/alchemist`
- using `ALCHEMIST_BASE_URL=/alchemist`.
## Supported Platforms
diff --git a/TODO.md b/TODO.md
new file mode 100644
index 0000000..25ab63c
--- /dev/null
+++ b/TODO.md
@@ -0,0 +1,3 @@
+# Todo List
+
+Remove `src/wizard.rs` from the project, the web setup handles it.. maybe keep for CLI users?
\ No newline at end of file
diff --git a/backlog.md b/backlog.md
index fae3f82..91ef5d4 100644
--- a/backlog.md
+++ b/backlog.md
@@ -45,11 +45,6 @@ documentation, or iteration.
- Token management endpoints and Settings UI
- Hand-maintained OpenAPI contract plus human API docs
-### Base URL / Subpath Support
-- `ALCHEMIST_BASE_URL` and matching config support
-- Router nesting under a configured path prefix
-- Frontend fetches, redirects, navigation, and SSE path generation updated for subpaths
-
### Distribution Foundation
- In-repo distribution metadata sources for:
- Homebrew
diff --git a/docs/docs/api.md b/docs/docs/api.md
index 6c09db4..e197f5d 100644
--- a/docs/docs/api.md
+++ b/docs/docs/api.md
@@ -9,8 +9,9 @@ except:
- `/api/auth/*`
- `/api/health`
- `/api/ready`
-- setup-mode exceptions: `/api/setup/*`, `/api/fs/*`,
- `/api/settings/bundle`, `/api/system/hardware`
+- during first-time setup, the setup UI and setup-related
+ unauthenticated routes are only reachable from the local
+ network
Authentication is established by `POST /api/auth/login`.
The backend also accepts `Authorization: Bearer `.
diff --git a/docs/docs/changelog.md b/docs/docs/changelog.md
index 1c8a18a..b72234f 100644
--- a/docs/docs/changelog.md
+++ b/docs/docs/changelog.md
@@ -3,6 +3,32 @@ title: Changelog
description: Release history for Alchemist.
---
+## [0.3.1-rc.1] - 2026-04-08
+
+### New Features
+
+#### Conversion & Library Workflows
+- **Experimental Conversion / Remux page** — upload a single file, inspect streams, preview the generated FFmpeg command, run a remux/transcode job through Alchemist, and download the result when complete.
+- **Expanded Library Intelligence** — duplicate detection now sits alongside storage-focused recommendation sections for remux-only opportunities, wasteful audio layouts, and commentary/descriptive-track cleanup candidates.
+
+#### Authentication & Automation
+- **Named API tokens** — create bearer tokens from Settings with `read_only` or `full_access` access classes. Tokens are only shown once at creation time and stored server-side as hashes.
+- **OpenAPI contract** — hand-maintained OpenAPI spec added alongside expanded human API docs for auth, token management, and update-check behavior.
+
+#### Notifications
+- **Provider-specific notification targets** — notification settings now use provider-specific configuration payloads instead of the old shared endpoint/token shape.
+- **Provider expansion** — Discord webhook, Discord bot, Gotify, generic webhook, Telegram, and SMTP email targets are supported.
+- **Richer event model** — notification events now distinguish queue/start/completion/failure plus scan completion, engine idle, and daily summary delivery.
+- **Daily summary scheduling** — notifications include a global `daily_summary_time_local` setting and per-target opt-in for digest delivery.
+
+#### Deployment & Distribution
+- **Windows update check** — the About dialog now checks GitHub Releases for the latest stable version and links directly to the release download page when an update is available.
+- **Distribution metadata generation** — in-repo Homebrew and AUR packaging templates plus workflow rendering were added as the foundation for package-manager distribution.
+
+### Documentation
+- **Config path clarity** — docs now consistently describe `~/.config/alchemist/config.toml` as the default host-side config location on Linux/macOS, while Docker examples still use `/app/config/config.toml` inside the container.
+- **Backlog realignment** — the backlog was rewritten around current repo reality, marking large newly implemented surfaces as “Implemented / In Progress” and keeping the roadmap automation-first.
+
## [0.3.0] - 2026-04-06
### Security
diff --git a/docs/docs/configuration-reference.md b/docs/docs/configuration-reference.md
index 2e9625e..8f6e603 100644
--- a/docs/docs/configuration-reference.md
+++ b/docs/docs/configuration-reference.md
@@ -97,7 +97,6 @@ requires at least one day in every window.
| `enable_telemetry` | bool | `false` | Opt-in anonymous telemetry switch |
| `log_retention_days` | int | `30` | Log retention period in days |
| `engine_mode` | string | `"balanced"` | Runtime engine mode: `background`, `balanced`, or `throughput` |
-| `base_url` | string | `""` | Path prefix for serving Alchemist under a subpath such as `/alchemist` |
## Example
diff --git a/docs/docs/environment-variables.md b/docs/docs/environment-variables.md
index f3eb32b..219991a 100644
--- a/docs/docs/environment-variables.md
+++ b/docs/docs/environment-variables.md
@@ -9,7 +9,6 @@ description: All environment variables Alchemist reads at startup.
| `ALCHEMIST_CONFIG` | (alias) | Alias for `ALCHEMIST_CONFIG_PATH` |
| `ALCHEMIST_DB_PATH` | `~/.config/alchemist/alchemist.db` | Path to SQLite database |
| `ALCHEMIST_DATA_DIR` | (none) | Sets data dir; `alchemist.db` placed here |
-| `ALCHEMIST_BASE_URL` | root (`/`) | Path prefix for serving Alchemist under a subpath such as `/alchemist` |
| `ALCHEMIST_CONFIG_MUTABLE` | `true` | Set `false` to block runtime config writes |
| `RUST_LOG` | `info` | Log level: `info`, `debug`, `alchemist=trace` |
diff --git a/docs/docs/first-run.md b/docs/docs/first-run.md
index a74cade..feebbfb 100644
--- a/docs/docs/first-run.md
+++ b/docs/docs/first-run.md
@@ -5,7 +5,8 @@ description: Getting through the setup wizard and starting your first scan.
When you first open Alchemist at `http://localhost:3000`
the setup wizard runs automatically. It takes about two
-minutes.
+minutes. Until the first account is created, setup is
+reachable only from the local network.
## Wizard steps
diff --git a/docs/docs/installation.md b/docs/docs/installation.md
index 021f577..c29f910 100644
--- a/docs/docs/installation.md
+++ b/docs/docs/installation.md
@@ -32,7 +32,8 @@ docker compose up -d
```
Open [http://localhost:3000](http://localhost:3000). The
-setup wizard runs on first visit.
+setup wizard runs on first visit and is only reachable
+from the local network until the first account is created.
For GPU passthrough (NVIDIA, Intel, AMD) see
[GPU Passthrough](/gpu-passthrough).
@@ -110,6 +111,19 @@ just dev
Windows contributor support covers the core `install/dev/check` path.
Broader `just` release and utility recipes remain Unix-first.
+## CLI subcommands
+
+```bash
+alchemist scan /path/to/media
+alchemist run /path/to/media
+alchemist plan /path/to/media
+alchemist plan /path/to/media --json
+```
+
+- `scan` enqueues matching jobs and exits
+- `run` scans, enqueues, and waits for processing to finish
+- `plan` reports what Alchemist would do without enqueueing jobs
+
## Nightly builds
```bash
diff --git a/src/config.rs b/src/config.rs
index 322c30e..2d65b2a 100644
--- a/src/config.rs
+++ b/src/config.rs
@@ -682,8 +682,6 @@ pub struct SystemConfig {
/// Enable HSTS header (only enable if running behind HTTPS)
#[serde(default)]
pub https_only: bool,
- #[serde(default)]
- pub base_url: String,
}
fn default_true() -> bool {
@@ -710,7 +708,6 @@ impl Default for SystemConfig {
log_retention_days: default_log_retention_days(),
engine_mode: EngineMode::default(),
https_only: false,
- base_url: String::new(),
}
}
}
@@ -826,7 +823,6 @@ impl Default for Config {
log_retention_days: default_log_retention_days(),
engine_mode: EngineMode::default(),
https_only: false,
- base_url: String::new(),
},
}
}
@@ -923,7 +919,6 @@ impl Config {
}
validate_schedule_time(&self.notifications.daily_summary_time_local)?;
- normalize_base_url(&self.system.base_url)?;
for target in &self.notifications.targets {
target.validate()?;
}
@@ -1026,7 +1021,6 @@ impl Config {
}
pub(crate) fn canonicalize_for_save(&mut self) {
- self.system.base_url = normalize_base_url(&self.system.base_url).unwrap_or_default();
if !self.notifications.targets.is_empty() {
self.notifications.webhook_url = None;
self.notifications.discord_webhook = None;
@@ -1046,33 +1040,7 @@ impl Config {
}
}
- pub(crate) fn apply_env_overrides(&mut self) {
- if let Ok(base_url) = std::env::var("ALCHEMIST_BASE_URL") {
- self.system.base_url = base_url;
- }
- self.system.base_url = normalize_base_url(&self.system.base_url).unwrap_or_default();
- }
-}
-
-pub fn normalize_base_url(value: &str) -> Result {
- let trimmed = value.trim();
- if trimmed.is_empty() || trimmed == "/" {
- return Ok(String::new());
- }
- if trimmed.contains("://") {
- anyhow::bail!("system.base_url must be a path prefix, not a full URL");
- }
- if !trimmed.starts_with('/') {
- anyhow::bail!("system.base_url must start with '/'");
- }
- if trimmed.contains('?') || trimmed.contains('#') {
- anyhow::bail!("system.base_url must not contain query or fragment components");
- }
- let normalized = trimmed.trim_end_matches('/');
- if normalized.contains("//") {
- anyhow::bail!("system.base_url must not contain repeated slashes");
- }
- Ok(normalized.to_string())
+ pub(crate) fn apply_env_overrides(&mut self) {}
}
fn validate_schedule_time(value: &str) -> Result<()> {
@@ -1158,65 +1126,4 @@ mod tests {
assert_eq!(EngineMode::default(), EngineMode::Balanced);
assert_eq!(EngineMode::Balanced.concurrent_jobs_for_cpu_count(8), 4);
}
-
- #[test]
- fn normalize_base_url_accepts_root_or_empty() {
- assert_eq!(
- normalize_base_url("").unwrap_or_else(|err| panic!("empty base url: {err}")),
- ""
- );
- assert_eq!(
- normalize_base_url("/").unwrap_or_else(|err| panic!("root base url: {err}")),
- ""
- );
- assert_eq!(
- normalize_base_url("/alchemist/")
- .unwrap_or_else(|err| panic!("trimmed base url: {err}")),
- "/alchemist"
- );
- }
-
- #[test]
- fn normalize_base_url_rejects_invalid_values() {
- assert!(normalize_base_url("alchemist").is_err());
- assert!(normalize_base_url("https://example.com/alchemist").is_err());
- assert!(normalize_base_url("/a//b").is_err());
- }
-
- #[test]
- fn env_base_url_override_takes_priority_on_load() {
- let config_path = std::env::temp_dir().join(format!(
- "alchemist_base_url_override_{}.toml",
- rand::random::()
- ));
- std::fs::write(
- &config_path,
- r#"
-[transcode]
-size_reduction_threshold = 0.3
-min_bpp_threshold = 0.1
-min_file_size_mb = 50
-concurrent_jobs = 1
-
-[hardware]
-preferred_vendor = "cpu"
-allow_cpu_fallback = true
-
-[scanner]
-directories = []
-
-[system]
-base_url = "/from-config"
-"#,
- )
- .unwrap_or_else(|err| panic!("failed to write temp config: {err}"));
-
- // SAFETY: test-only environment mutation.
- unsafe { std::env::set_var("ALCHEMIST_BASE_URL", "/from-env") };
- let config =
- Config::load(&config_path).unwrap_or_else(|err| panic!("failed to load config: {err}"));
- assert_eq!(config.system.base_url, "/from-env");
- unsafe { std::env::remove_var("ALCHEMIST_BASE_URL") };
- let _ = std::fs::remove_file(config_path);
- }
}
diff --git a/src/main.rs b/src/main.rs
index 3b0ba18..23135f7 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -2,15 +2,18 @@
use alchemist::db::EventChannels;
use alchemist::error::Result;
+use alchemist::media::pipeline::{Analyzer as _, Planner as _};
use alchemist::system::hardware;
use alchemist::version;
use alchemist::{Agent, Transcoder, config, db, runtime};
-use clap::Parser;
+use clap::{Parser, Subcommand};
+use serde::Serialize;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
use tracing::{debug, error, info, warn};
use tracing_subscriber::EnvFilter;
+use tracing_subscriber::fmt::time::time;
use notify::{RecursiveMode, Watcher};
use tokio::sync::RwLock;
@@ -19,21 +22,55 @@ use tokio::sync::broadcast;
#[derive(Parser, Debug)]
#[command(author, version = version::current(), about, long_about = None)]
struct Args {
- /// Run in CLI mode (process directories and exit)
- #[arg(long)]
- cli: bool,
-
- /// Directories to scan for media files (CLI mode only)
- #[arg(long, value_name = "DIR")]
- directories: Vec,
-
- /// Dry run (don't actually transcode)
- #[arg(short, long)]
- dry_run: bool,
-
/// Reset admin user/password and sessions (forces setup mode)
#[arg(long)]
reset_auth: bool,
+
+ /// Enable verbose terminal logging and default DEBUG filtering
+ #[arg(long)]
+ debug_flags: bool,
+
+ #[command(subcommand)]
+ command: Option,
+}
+
+#[derive(Subcommand, Debug, Clone)]
+enum Commands {
+ /// Scan directories and enqueue matching work, then exit
+ Scan {
+ #[arg(value_name = "DIR", required = true)]
+ directories: Vec,
+ },
+ /// Scan directories, enqueue work, and wait for processing to finish
+ Run {
+ #[arg(value_name = "DIR", required = true)]
+ directories: Vec,
+ /// Don't actually transcode
+ #[arg(short, long)]
+ dry_run: bool,
+ },
+ /// Analyze files and report what Alchemist would do without enqueuing jobs
+ Plan {
+ #[arg(value_name = "DIR", required = true)]
+ directories: Vec,
+ /// Emit machine-readable JSON instead of human-readable text
+ #[arg(long)]
+ json: bool,
+ },
+}
+
+#[derive(Debug, Serialize)]
+struct CliPlanItem {
+ input_path: String,
+ output_path: Option,
+ profile: Option,
+ decision: String,
+ reason: String,
+ encoder: Option,
+ backend: Option,
+ rate_control: Option,
+ fallback: Option,
+ error: Option,
}
#[tokio::main]
@@ -160,76 +197,79 @@ fn should_enter_setup_mode_for_missing_users(is_server_mode: bool, has_users: bo
}
async fn run() -> Result<()> {
- // Initialize logging
- tracing_subscriber::fmt()
- .with_env_filter(EnvFilter::from_default_env().add_directive(tracing::Level::INFO.into()))
- .with_target(true)
- .with_thread_ids(true)
- .with_thread_names(true)
- .init();
+ let args = Args::parse();
+ init_logging(args.debug_flags);
+ let is_server_mode = args.command.is_none();
let boot_start = Instant::now();
- // Startup Banner
- info!(
- " ______ __ ______ __ __ ______ __ __ __ ______ ______ "
- );
- info!(
- "/\\ __ \\ /\\ \\ /\\ ___\\ /\\ \\_\\ \\ /\\ ___\\ /\\ \"-./ \\ /\\ \\ /\\ ___\\ /\\__ _\\"
- );
- info!(
- "\\ \\ __ \\ \\ \\ \\____ \\ \\ \\____ \\ \\ __ \\ \\ \\ __\\ \\ \\ \\-./\\ \\ \\ \\ \\ \\ \\___ \\ \\/_/\\ \\/"
- );
- info!(
- " \\ \\_\\ \\_\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\_\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_\\ \\/\\_____\\ \\ \\_\\"
- );
- info!(
- " \\/_/\\/_/ \\/_____/ \\/_____/ \\/_/\\/_/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_____/ \\/_/"
- );
- info!("");
- info!("");
- let version = alchemist::version::current();
- let build_info = option_env!("BUILD_INFO")
- .or(option_env!("GIT_SHA"))
- .or(option_env!("VERGEN_GIT_SHA"))
- .unwrap_or("unknown");
- info!("Version: {}", version);
- info!("Build: {}", build_info);
- info!("");
- info!("System Information:");
- info!(
- " OS: {} ({})",
- std::env::consts::OS,
- std::env::consts::ARCH
- );
- info!(" CPUs: {}", num_cpus::get());
- info!("");
-
- let args = Args::parse();
info!(
target: "startup",
- "Parsed CLI args: cli_mode={}, reset_auth={}, dry_run={}, directories={}",
- args.cli,
+ "Parsed CLI args: command={:?}, reset_auth={}, debug_flags={}",
+ args.command,
args.reset_auth,
- args.dry_run,
- args.directories.len()
+ args.debug_flags
);
- // ... rest of logic remains largely the same, just inside run()
- // Default to server mode unless CLI is explicitly requested.
- let is_server_mode = !args.cli;
- info!(target: "startup", "Resolved server mode: {}", is_server_mode);
- if is_server_mode && !args.directories.is_empty() {
- warn!("Directories were provided without --cli; ignoring CLI inputs.");
+ if is_server_mode {
+ info!(
+ " ______ __ ______ __ __ ______ __ __ __ ______ ______ "
+ );
+ info!(
+ "/\\ __ \\ /\\ \\ /\\ ___\\ /\\ \\_\\ \\ /\\ ___\\ /\\ \"-./ \\ /\\ \\ /\\ ___\\ /\\__ _\\"
+ );
+ info!(
+ "\\ \\ __ \\ \\ \\ \\____ \\ \\ \\____ \\ \\ __ \\ \\ \\ __\\ \\ \\ \\-./\\ \\ \\ \\ \\ \\ \\___ \\ \\/_/\\ \\/"
+ );
+ info!(
+ " \\ \\_\\ \\_\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\_\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_\\ \\/\\_____\\ \\ \\_\\"
+ );
+ info!(
+ " \\/_/\\/_/ \\/_____/ \\/_____/ \\/_/\\/_/ \\/_____/ \\/_/ \\/_/ \\/_/ \\/_____/ \\/_/"
+ );
+ info!("");
+ info!("");
+ let version = alchemist::version::current();
+ let build_info = option_env!("BUILD_INFO")
+ .or(option_env!("GIT_SHA"))
+ .or(option_env!("VERGEN_GIT_SHA"))
+ .unwrap_or("unknown");
+ info!("Version: {}", version);
+ info!("Build: {}", build_info);
+ info!("");
+ info!("System Information:");
+ info!(
+ " OS: {} ({})",
+ std::env::consts::OS,
+ std::env::consts::ARCH
+ );
+ info!(" CPUs: {}", num_cpus::get());
+ info!("");
}
+ info!(target: "startup", "Resolved server mode: {}", is_server_mode);
+
// 0. Load Configuration
let config_start = Instant::now();
let config_path = runtime::config_path();
let db_path = runtime::db_path();
let config_mutable = runtime::config_mutable();
- let (config, mut setup_mode, config_exists) =
- load_startup_config(config_path.as_path(), is_server_mode);
+ let (config, mut setup_mode, config_exists) = if is_server_mode {
+ load_startup_config(config_path.as_path(), true)
+ } else {
+ if !config_path.exists() {
+ error!(
+ "Configuration required. Run Alchemist in server mode to complete setup, or create {:?} manually.",
+ config_path
+ );
+ return Err(alchemist::error::AlchemistError::Config(
+ "Missing configuration".into(),
+ ));
+ }
+ let config = config::Config::load(config_path.as_path())
+ .map_err(|err| alchemist::error::AlchemistError::Config(err.to_string()))?;
+ (config, false, true)
+ };
info!(
target: "startup",
"Config loaded (path={:?}, exists={}, mutable={}, setup_mode={}) in {} ms",
@@ -371,9 +411,9 @@ async fn run() -> Result<()> {
warn!("Auth reset requested. All users and sessions cleared.");
setup_mode = true;
}
+ let has_users = db.has_users().await?;
if is_server_mode {
let users_start = Instant::now();
- let has_users = db.has_users().await?;
info!(
target: "startup",
"User check completed (has_users={}) in {} ms",
@@ -386,6 +426,13 @@ async fn run() -> Result<()> {
}
setup_mode = true;
}
+ } else if !has_users {
+ error!(
+ "Setup is not complete. Run Alchemist in server mode to finish creating the first account."
+ );
+ return Err(alchemist::error::AlchemistError::Config(
+ "Setup incomplete".into(),
+ ));
}
if !setup_mode {
@@ -518,7 +565,7 @@ async fn run() -> Result<()> {
hardware_state.clone(),
tx.clone(),
event_channels.clone(),
- args.dry_run,
+ matches!(args.command, Some(Commands::Run { dry_run: true, .. })),
)
.await,
);
@@ -748,56 +795,307 @@ async fn run() -> Result<()> {
}
}
} else {
- // CLI Mode
- if setup_mode {
- error!(
- "Configuration required. Run without --cli to use the web-based setup wizard, or create {:?} manually.",
- config_path
- );
-
- // CLI early exit - error
- // (Caller will handle pause-on-exit if needed)
- return Err(alchemist::error::AlchemistError::Config(
- "Missing configuration".into(),
- ));
- }
-
- if args.directories.is_empty() {
- error!("No directories provided. Usage: alchemist --cli --dir [--dir ...]");
- return Err(alchemist::error::AlchemistError::Config(
- "Missing directories for CLI mode".into(),
- ));
- }
- agent.scan_and_enqueue(args.directories).await?;
-
- // Wait until all jobs are processed
- info!("Waiting for jobs to complete...");
- loop {
- let stats = db.get_stats().await?;
- let active = stats
- .as_object()
- .map(|m| {
- m.iter()
- .filter(|(k, _)| {
- ["encoding", "analyzing", "remuxing", "resuming"].contains(&k.as_str())
- })
- .map(|(_, v)| v.as_i64().unwrap_or(0))
- .sum::()
- })
- .unwrap_or(0);
- let queued = stats.get("queued").and_then(|v| v.as_i64()).unwrap_or(0);
-
- if active + queued == 0 {
- break;
+ match args
+ .command
+ .clone()
+ .expect("CLI branch requires a subcommand")
+ {
+ Commands::Scan { directories } => {
+ agent.scan_and_enqueue(directories).await?;
+ info!("Scan complete. Matching files were enqueued.");
+ }
+ Commands::Run { directories, .. } => {
+ agent.scan_and_enqueue(directories).await?;
+ wait_for_cli_jobs(db.as_ref()).await?;
+ info!("All jobs processed.");
+ }
+ Commands::Plan { directories, json } => {
+ let items =
+ build_cli_plan(db.as_ref(), config.clone(), &hardware_state, directories)
+ .await?;
+ if json {
+ println!(
+ "{}",
+ serde_json::to_string_pretty(&items).unwrap_or_else(|_| "[]".to_string())
+ );
+ } else {
+ print_cli_plan(&items);
+ }
}
- tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
}
- info!("All jobs processed.");
}
Ok(())
}
+async fn wait_for_cli_jobs(db: &db::Db) -> Result<()> {
+ info!("Waiting for jobs to complete...");
+ loop {
+ let stats = db.get_stats().await?;
+ let active = stats
+ .as_object()
+ .map(|m| {
+ m.iter()
+ .filter(|(k, _)| {
+ ["encoding", "analyzing", "remuxing", "resuming"].contains(&k.as_str())
+ })
+ .map(|(_, v)| v.as_i64().unwrap_or(0))
+ .sum::()
+ })
+ .unwrap_or(0);
+ let queued = stats.get("queued").and_then(|v| v.as_i64()).unwrap_or(0);
+
+ if active + queued == 0 {
+ break;
+ }
+ tokio::time::sleep(tokio::time::Duration::from_secs(2)).await;
+ }
+ Ok(())
+}
+
+async fn build_cli_plan(
+ db: &db::Db,
+ config_state: Arc>,
+ hardware_state: &hardware::HardwareState,
+ directories: Vec,
+) -> Result> {
+ let files = tokio::task::spawn_blocking(move || {
+ let scanner = alchemist::media::scanner::Scanner::new();
+ scanner.scan(directories)
+ })
+ .await
+ .map_err(|err| alchemist::error::AlchemistError::Unknown(format!("scan task failed: {err}")))?;
+
+ let file_settings = match db.get_file_settings().await {
+ Ok(settings) => settings,
+ Err(err) => {
+ error!("Failed to fetch file settings, using defaults: {}", err);
+ alchemist::media::pipeline::default_file_settings()
+ }
+ };
+ let config_snapshot = Arc::new(config_state.read().await.clone());
+ let hw_info = hardware_state.snapshot().await;
+ let planner = alchemist::media::planner::BasicPlanner::new(config_snapshot, hw_info);
+ let analyzer = alchemist::media::analyzer::FfmpegAnalyzer;
+
+ let mut items = Vec::new();
+ for discovered in files {
+ let input_path = discovered.path.clone();
+ let input_path_string = input_path.display().to_string();
+
+ if let Some(reason) = alchemist::media::pipeline::skip_reason_for_discovered_path(
+ db,
+ &input_path,
+ &file_settings,
+ )
+ .await?
+ {
+ items.push(CliPlanItem {
+ input_path: input_path_string,
+ output_path: None,
+ profile: None,
+ decision: "skip".to_string(),
+ reason: reason.to_string(),
+ encoder: None,
+ backend: None,
+ rate_control: None,
+ fallback: None,
+ error: None,
+ });
+ continue;
+ }
+
+ let output_path =
+ file_settings.output_path_for_source(&input_path, discovered.source_root.as_deref());
+ if output_path.exists() && !file_settings.should_replace_existing_output() {
+ items.push(CliPlanItem {
+ input_path: input_path_string,
+ output_path: Some(output_path.display().to_string()),
+ profile: None,
+ decision: "skip".to_string(),
+ reason: "output exists and replace strategy is keep".to_string(),
+ encoder: None,
+ backend: None,
+ rate_control: None,
+ fallback: None,
+ error: None,
+ });
+ continue;
+ }
+
+ let analysis = match analyzer.analyze(&input_path).await {
+ Ok(analysis) => analysis,
+ Err(err) => {
+ items.push(CliPlanItem {
+ input_path: input_path_string,
+ output_path: Some(output_path.display().to_string()),
+ profile: None,
+ decision: "error".to_string(),
+ reason: "analysis failed".to_string(),
+ encoder: None,
+ backend: None,
+ rate_control: None,
+ fallback: None,
+ error: Some(err.to_string()),
+ });
+ continue;
+ }
+ };
+
+ let profile = match db.get_profile_for_path(&input_path.to_string_lossy()).await {
+ Ok(profile) => profile,
+ Err(err) => {
+ items.push(CliPlanItem {
+ input_path: input_path_string,
+ output_path: Some(output_path.display().to_string()),
+ profile: None,
+ decision: "error".to_string(),
+ reason: "profile resolution failed".to_string(),
+ encoder: None,
+ backend: None,
+ rate_control: None,
+ fallback: None,
+ error: Some(err.to_string()),
+ });
+ continue;
+ }
+ };
+
+ let plan = match planner
+ .plan(&analysis, &output_path, profile.as_ref())
+ .await
+ {
+ Ok(plan) => plan,
+ Err(err) => {
+ items.push(CliPlanItem {
+ input_path: input_path_string,
+ output_path: Some(output_path.display().to_string()),
+ profile: profile.as_ref().map(|p| p.name.clone()),
+ decision: "error".to_string(),
+ reason: "planning failed".to_string(),
+ encoder: None,
+ backend: None,
+ rate_control: None,
+ fallback: None,
+ error: Some(err.to_string()),
+ });
+ continue;
+ }
+ };
+
+ let (decision, reason) = match &plan.decision {
+ alchemist::media::pipeline::TranscodeDecision::Skip { reason } => {
+ ("skip".to_string(), reason.clone())
+ }
+ alchemist::media::pipeline::TranscodeDecision::Remux { reason } => {
+ ("remux".to_string(), reason.clone())
+ }
+ alchemist::media::pipeline::TranscodeDecision::Transcode { reason } => {
+ ("transcode".to_string(), reason.clone())
+ }
+ };
+
+ items.push(CliPlanItem {
+ input_path: input_path_string,
+ output_path: Some(output_path.display().to_string()),
+ profile: profile.as_ref().map(|p| p.name.clone()),
+ decision,
+ reason,
+ encoder: plan
+ .encoder
+ .map(|encoder| encoder.ffmpeg_encoder_name().to_string()),
+ backend: plan.backend.map(|backend| backend.as_str().to_string()),
+ rate_control: plan.rate_control.as_ref().map(format_rate_control),
+ fallback: plan
+ .fallback
+ .as_ref()
+ .map(|fallback| fallback.reason.clone()),
+ error: None,
+ });
+ }
+
+ Ok(items)
+}
+
+fn format_rate_control(rate_control: &alchemist::media::pipeline::RateControl) -> String {
+ match rate_control {
+ alchemist::media::pipeline::RateControl::Crf { value } => format!("crf:{value}"),
+ alchemist::media::pipeline::RateControl::Cq { value } => format!("cq:{value}"),
+ alchemist::media::pipeline::RateControl::QsvQuality { value } => {
+ format!("qsv_quality:{value}")
+ }
+ alchemist::media::pipeline::RateControl::Bitrate { kbps } => format!("bitrate:{kbps}k"),
+ }
+}
+
+fn print_cli_plan(items: &[CliPlanItem]) {
+ for item in items {
+ println!("{}", item.input_path);
+ println!(" decision: {} — {}", item.decision, item.reason);
+ if let Some(output_path) = &item.output_path {
+ println!(" output: {}", output_path);
+ }
+ if let Some(profile) = &item.profile {
+ println!(" profile: {}", profile);
+ }
+ if let Some(encoder) = &item.encoder {
+ let backend = item.backend.as_deref().unwrap_or("unknown");
+ println!(" encoder: {} ({})", encoder, backend);
+ }
+ if let Some(rate_control) = &item.rate_control {
+ println!(" rate: {}", rate_control);
+ }
+ if let Some(fallback) = &item.fallback {
+ println!(" fallback: {}", fallback);
+ }
+ if let Some(error) = &item.error {
+ println!(" error: {}", error);
+ }
+ println!();
+ }
+}
+
+fn init_logging(debug_flags: bool) {
+ let default_level = if debug_flags {
+ tracing::Level::DEBUG
+ } else {
+ tracing::Level::INFO
+ };
+ let env_filter = EnvFilter::from_default_env().add_directive(default_level.into());
+
+ if debug_flags {
+ tracing_subscriber::fmt()
+ .with_env_filter(env_filter)
+ .with_target(true)
+ .with_thread_ids(true)
+ .with_thread_names(true)
+ .with_timer(time())
+ .init();
+ } else {
+ tracing_subscriber::fmt()
+ .with_env_filter(env_filter)
+ .without_time()
+ .with_target(false)
+ .with_thread_ids(false)
+ .with_thread_names(false)
+ .compact()
+ .init();
+ }
+}
+
+#[cfg(test)]
+mod logging_tests {
+ use super::*;
+ use clap::Parser;
+
+ #[test]
+ fn debug_flags_arg_parses() {
+ let args = Args::try_parse_from(["alchemist", "--debug-flags"])
+ .unwrap_or_else(|err| panic!("failed to parse debug flag: {err}"));
+ assert!(args.debug_flags);
+ }
+}
+
#[cfg(test)]
mod version_cli_tests {
use super::*;
@@ -836,6 +1134,41 @@ mod tests {
assert!(Args::try_parse_from(["alchemist", "--output-dir", "/tmp/out"]).is_err());
}
+ #[test]
+ fn args_reject_removed_cli_flag() {
+ assert!(Args::try_parse_from(["alchemist", "--cli"]).is_err());
+ }
+
+ #[test]
+ fn scan_subcommand_parses() {
+ let args = Args::try_parse_from(["alchemist", "scan", "/tmp/media"])
+ .unwrap_or_else(|err| panic!("failed to parse scan subcommand: {err}"));
+ assert!(matches!(
+ args.command,
+ Some(Commands::Scan { directories }) if directories == vec![PathBuf::from("/tmp/media")]
+ ));
+ }
+
+ #[test]
+ fn run_subcommand_parses_with_dry_run() {
+ let args = Args::try_parse_from(["alchemist", "run", "/tmp/media", "--dry-run"])
+ .unwrap_or_else(|err| panic!("failed to parse run subcommand: {err}"));
+ assert!(matches!(
+ args.command,
+ Some(Commands::Run { directories, dry_run }) if directories == vec![PathBuf::from("/tmp/media")] && dry_run
+ ));
+ }
+
+ #[test]
+ fn plan_subcommand_parses_with_json() {
+ let args = Args::try_parse_from(["alchemist", "plan", "/tmp/media", "--json"])
+ .unwrap_or_else(|err| panic!("failed to parse plan subcommand: {err}"));
+ assert!(matches!(
+ args.command,
+ Some(Commands::Plan { directories, json }) if directories == vec![PathBuf::from("/tmp/media")] && json
+ ));
+ }
+
#[test]
fn config_reload_matches_create_modify_and_rename_events() {
let config_path = PathBuf::from("/tmp/alchemist-config.toml");
diff --git a/src/media/ffmpeg/cpu.rs b/src/media/ffmpeg/cpu.rs
index b720578..eed1c4b 100644
--- a/src/media/ffmpeg/cpu.rs
+++ b/src/media/ffmpeg/cpu.rs
@@ -6,6 +6,7 @@ pub fn append_args(
encoder: Encoder,
rate_control: Option,
preset: Option<&str>,
+ tag_hevc_as_hvc1: bool,
) {
match encoder {
Encoder::Av1Svt => {
@@ -48,9 +49,10 @@ pub fn append_args(
preset.unwrap_or(CpuPreset::Medium.as_str()).to_string(),
"-crf".to_string(),
crf,
- "-tag:v".to_string(),
- "hvc1".to_string(),
]);
+ if tag_hevc_as_hvc1 {
+ args.extend(["-tag:v".to_string(), "hvc1".to_string()]);
+ }
}
Encoder::H264X264 => {
let crf = match rate_control {
diff --git a/src/media/ffmpeg/mod.rs b/src/media/ffmpeg/mod.rs
index ca823f7..14a8bda 100644
--- a/src/media/ffmpeg/mod.rs
+++ b/src/media/ffmpeg/mod.rs
@@ -182,6 +182,7 @@ impl<'a> FFmpegCommandBuilder<'a> {
}
let rate_control = self.plan.rate_control.clone();
+ let tag_hevc_as_hvc1 = uses_quicktime_container(&self.plan.container);
let mut args = vec![
"-hide_banner".to_string(),
"-y".to_string(),
@@ -249,12 +250,7 @@ impl<'a> FFmpegCommandBuilder<'a> {
Encoder::Av1Videotoolbox
| Encoder::HevcVideotoolbox
| Encoder::H264Videotoolbox => {
- videotoolbox::append_args(
- &mut args,
- encoder,
- rate_control.clone(),
- default_quality(&self.plan.rate_control, 65),
- );
+ videotoolbox::append_args(&mut args, encoder, tag_hevc_as_hvc1);
}
Encoder::Av1Svt | Encoder::Av1Aom | Encoder::HevcX265 | Encoder::H264X264 => {
cpu::append_args(
@@ -262,6 +258,7 @@ impl<'a> FFmpegCommandBuilder<'a> {
encoder,
rate_control.clone(),
self.plan.encoder_preset.as_deref(),
+ tag_hevc_as_hvc1,
);
}
}
@@ -285,7 +282,7 @@ impl<'a> FFmpegCommandBuilder<'a> {
apply_subtitle_plan(&mut args, &self.plan.subtitles);
apply_color_metadata(&mut args, self.metadata, &self.plan.filters);
- if matches!(self.plan.container.as_str(), "mp4" | "m4v" | "mov") {
+ if uses_quicktime_container(&self.plan.container) {
args.push("-movflags".to_string());
args.push("+faststart".to_string());
}
@@ -483,6 +480,10 @@ fn output_format_name(container: &str) -> &str {
}
}
+fn uses_quicktime_container(container: &str) -> bool {
+ matches!(container, "mp4" | "m4v" | "mov")
+}
+
#[derive(Debug, Clone, Default)]
pub struct FFmpegProgress {
pub frame: u64,
@@ -1041,6 +1042,83 @@ mod tests {
.build_args()
.unwrap_or_else(|err| panic!("failed to build videotoolbox args: {err}"));
assert!(args.contains(&"hevc_videotoolbox".to_string()));
+ assert!(!args.contains(&"hvc1".to_string()));
+ assert!(!args.contains(&"-q:v".to_string()));
+ assert!(!args.contains(&"-b:v".to_string()));
+ }
+
+ #[test]
+ fn hevc_videotoolbox_mp4_adds_hvc1_tag() {
+ let metadata = metadata();
+ let mut plan = plan_for(Encoder::HevcVideotoolbox);
+ plan.container = "mp4".to_string();
+ let builder = FFmpegCommandBuilder::new(
+ Path::new("/tmp/in.mkv"),
+ Path::new("/tmp/out.mp4"),
+ &metadata,
+ &plan,
+ );
+ let args = builder
+ .build_args()
+ .unwrap_or_else(|err| panic!("failed to build mp4 videotoolbox args: {err}"));
+ assert!(args.contains(&"hevc_videotoolbox".to_string()));
+ assert!(args.contains(&"hvc1".to_string()));
+ assert!(!args.contains(&"-q:v".to_string()));
+ }
+
+ #[test]
+ fn hevc_videotoolbox_bitrate_mode_uses_generic_bitrate_flag() {
+ let metadata = metadata();
+ let mut plan = plan_for(Encoder::HevcVideotoolbox);
+ plan.rate_control = Some(RateControl::Bitrate { kbps: 2500 });
+ let builder = FFmpegCommandBuilder::new(
+ Path::new("/tmp/in.mkv"),
+ Path::new("/tmp/out.mkv"),
+ &metadata,
+ &plan,
+ );
+ let args = builder
+ .build_args()
+ .unwrap_or_else(|err| panic!("failed to build bitrate videotoolbox args: {err}"));
+ assert!(args.contains(&"hevc_videotoolbox".to_string()));
+ assert!(args.contains(&"-b:v".to_string()));
+ assert!(args.contains(&"2500k".to_string()));
+ assert!(!args.contains(&"-q:v".to_string()));
+ }
+
+ #[test]
+ fn hevc_x265_mkv_does_not_add_hvc1_tag() {
+ let metadata = metadata();
+ let plan = plan_for(Encoder::HevcX265);
+ let builder = FFmpegCommandBuilder::new(
+ Path::new("/tmp/in.mkv"),
+ Path::new("/tmp/out.mkv"),
+ &metadata,
+ &plan,
+ );
+ let args = builder
+ .build_args()
+ .unwrap_or_else(|err| panic!("failed to build mkv x265 args: {err}"));
+ assert!(args.contains(&"libx265".to_string()));
+ assert!(!args.contains(&"hvc1".to_string()));
+ }
+
+ #[test]
+ fn hevc_x265_mp4_adds_hvc1_tag() {
+ let metadata = metadata();
+ let mut plan = plan_for(Encoder::HevcX265);
+ plan.container = "mp4".to_string();
+ let builder = FFmpegCommandBuilder::new(
+ Path::new("/tmp/in.mkv"),
+ Path::new("/tmp/out.mp4"),
+ &metadata,
+ &plan,
+ );
+ let args = builder
+ .build_args()
+ .unwrap_or_else(|err| panic!("failed to build mp4 x265 args: {err}"));
+ assert!(args.contains(&"libx265".to_string()));
+ assert!(args.contains(&"hvc1".to_string()));
}
#[test]
diff --git a/src/media/ffmpeg/videotoolbox.rs b/src/media/ffmpeg/videotoolbox.rs
index c361ae0..f969152 100644
--- a/src/media/ffmpeg/videotoolbox.rs
+++ b/src/media/ffmpeg/videotoolbox.rs
@@ -1,51 +1,29 @@
-use crate::media::pipeline::{Encoder, RateControl};
-
-pub fn append_args(
- args: &mut Vec,
- encoder: Encoder,
- rate_control: Option,
- default_quality: u8,
-) {
- let cq = match rate_control {
- Some(RateControl::Cq { value }) => value,
- _ => default_quality,
- };
+use crate::media::pipeline::Encoder;
+pub fn append_args(args: &mut Vec, encoder: Encoder, tag_hevc_as_hvc1: bool) {
+ // Current FFmpeg VideoToolbox encoders on macOS do not expose qscale-style
+ // quality controls, so bitrate mode is handled by the shared builder and
+ // CQ-style requests intentionally fall back to the encoder defaults.
match encoder {
Encoder::Av1Videotoolbox => {
args.extend([
"-c:v".to_string(),
"av1_videotoolbox".to_string(),
- "-b:v".to_string(),
- "0".to_string(),
- "-q:v".to_string(),
- cq.to_string(),
"-allow_sw".to_string(),
"1".to_string(),
]);
}
Encoder::HevcVideotoolbox => {
- args.extend([
- "-c:v".to_string(),
- "hevc_videotoolbox".to_string(),
- "-b:v".to_string(),
- "0".to_string(),
- "-q:v".to_string(),
- cq.to_string(),
- "-tag:v".to_string(),
- "hvc1".to_string(),
- "-allow_sw".to_string(),
- "1".to_string(),
- ]);
+ args.extend(["-c:v".to_string(), "hevc_videotoolbox".to_string()]);
+ if tag_hevc_as_hvc1 {
+ args.extend(["-tag:v".to_string(), "hvc1".to_string()]);
+ }
+ args.extend(["-allow_sw".to_string(), "1".to_string()]);
}
Encoder::H264Videotoolbox => {
args.extend([
"-c:v".to_string(),
"h264_videotoolbox".to_string(),
- "-b:v".to_string(),
- "0".to_string(),
- "-q:v".to_string(),
- cq.to_string(),
"-allow_sw".to_string(),
"1".to_string(),
]);
diff --git a/src/media/pipeline.rs b/src/media/pipeline.rs
index b3326de..487b039 100644
--- a/src/media/pipeline.rs
+++ b/src/media/pipeline.rs
@@ -514,7 +514,7 @@ pub async fn enqueue_discovered_with_db(
.await
}
-fn default_file_settings() -> crate::db::FileSettings {
+pub fn default_file_settings() -> crate::db::FileSettings {
crate::db::FileSettings {
id: 1,
delete_source: false,
@@ -525,7 +525,10 @@ fn default_file_settings() -> crate::db::FileSettings {
}
}
-fn matches_generated_output_pattern(path: &Path, settings: &crate::db::FileSettings) -> bool {
+pub(crate) fn matches_generated_output_pattern(
+ path: &Path,
+ settings: &crate::db::FileSettings,
+) -> bool {
let expected_extension = settings.output_extension.trim_start_matches('.');
if !expected_extension.is_empty() {
let actual_extension = match path.extension().and_then(|extension| extension.to_str()) {
@@ -548,7 +551,7 @@ fn matches_generated_output_pattern(path: &Path, settings: &crate::db::FileSetti
.is_some_and(|stem| stem.ends_with(suffix))
}
-async fn skip_reason_for_discovered_path(
+pub async fn skip_reason_for_discovered_path(
db: &crate::db::Db,
path: &Path,
settings: &crate::db::FileSettings,
diff --git a/src/media/processor.rs b/src/media/processor.rs
index 990330e..0a2184e 100644
--- a/src/media/processor.rs
+++ b/src/media/processor.rs
@@ -170,9 +170,9 @@ impl Agent {
pub fn set_boot_analyzing(&self, value: bool) {
self.analyzing_boot.store(value, Ordering::SeqCst);
if value {
- info!("Boot analysis started — engine claim loop paused.");
+ debug!("Boot analysis started — engine claim loop paused.");
} else {
- info!("Boot analysis complete — engine claim loop resumed.");
+ debug!("Boot analysis complete — engine claim loop resumed.");
}
}
@@ -218,7 +218,7 @@ impl Agent {
/// semaphore permit.
async fn _run_analysis_pass(&self) {
self.set_boot_analyzing(true);
- info!("Auto-analysis: starting pass...");
+ debug!("Auto-analysis: starting pass...");
// NOTE: reset_interrupted_jobs is intentionally
// NOT called here. It is a one-time startup
@@ -244,7 +244,7 @@ impl Agent {
}
let batch_len = batch.len();
- info!("Auto-analysis: analyzing {} job(s)...", batch_len);
+ debug!("Auto-analysis: analyzing {} job(s)...", batch_len);
for job in batch {
let pipeline = self.pipeline();
@@ -264,9 +264,9 @@ impl Agent {
self.set_boot_analyzing(false);
if total_analyzed == 0 {
- info!("Auto-analysis: no jobs pending analysis.");
+ debug!("Auto-analysis: no jobs pending analysis.");
} else {
- info!(
+ debug!(
"Auto-analysis: complete. {} job(s) analyzed.",
total_analyzed
);
@@ -359,7 +359,7 @@ impl Agent {
}
pub async fn run_loop(self: Arc) {
- info!("Agent loop started.");
+ debug!("Agent loop started.");
loop {
// Block while paused OR while boot analysis runs
if self.is_paused() || self.is_boot_analyzing() {
diff --git a/src/media/scanner.rs b/src/media/scanner.rs
index a2a8c54..c399067 100644
--- a/src/media/scanner.rs
+++ b/src/media/scanner.rs
@@ -2,7 +2,7 @@ use rayon::prelude::*;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
-use tracing::{debug, error, info};
+use tracing::{debug, error};
use walkdir::WalkDir;
use crate::media::pipeline::DiscoveredMedia;
@@ -45,7 +45,7 @@ impl Scanner {
);
directories.into_par_iter().for_each(|(dir, recursive)| {
- info!("Scanning directory: {:?} (recursive: {})", dir, recursive);
+ debug!("Scanning directory: {:?} (recursive: {})", dir, recursive);
let mut local_files = Vec::new();
let source_roots = source_roots.clone();
let walker = if recursive {
@@ -90,7 +90,6 @@ impl Scanner {
// Deterministic ordering
final_files.sort_by(|a, b| a.path.cmp(&b.path));
- info!("Found {} candidate media files", final_files.len());
final_files
}
}
diff --git a/src/server/middleware.rs b/src/server/middleware.rs
index c3cff05..fd20d7d 100644
--- a/src/server/middleware.rs
+++ b/src/server/middleware.rs
@@ -76,6 +76,18 @@ pub(crate) async fn auth_middleware(
let path = req.uri().path();
let method = req.method().clone();
+ if state.setup_required.load(Ordering::Relaxed)
+ && path != "/api/health"
+ && path != "/api/ready"
+ && !request_is_lan(&req)
+ {
+ return (
+ StatusCode::FORBIDDEN,
+ "Alchemist setup is only available from the local network",
+ )
+ .into_response();
+ }
+
// 1. API Protection: Only lock down /api routes
if path.starts_with("/api") {
// Public API endpoints
@@ -92,28 +104,7 @@ pub(crate) async fn auth_middleware(
return next.run(req).await;
}
if state.setup_required.load(Ordering::Relaxed) && path.starts_with("/api/fs/") {
- // Only allow filesystem browsing from localhost
- // during setup — no account exists yet so we
- // cannot authenticate the caller.
- let connect_info = req.extensions().get::>();
- let is_local = connect_info
- .map(|ci| {
- let ip = ci.0.ip();
- ip.is_loopback()
- })
- .unwrap_or(false);
-
- if is_local {
- return next.run(req).await;
- }
- // Non-local request during setup -> 403
- return Response::builder()
- .status(StatusCode::FORBIDDEN)
- .body(axum::body::Body::from(
- "Filesystem browsing is only available \
- from localhost during setup",
- ))
- .unwrap_or_else(|_| StatusCode::FORBIDDEN.into_response());
+ return next.run(req).await;
}
if state.setup_required.load(Ordering::Relaxed) && path == "/api/settings/bundle" {
return next.run(req).await;
@@ -157,6 +148,10 @@ pub(crate) async fn auth_middleware(
next.run(req).await
}
+fn request_is_lan(req: &Request) -> bool {
+ request_ip(req).is_some_and(is_lan_ip)
+}
+
fn read_only_api_token_allows(method: &Method, path: &str) -> bool {
if *method != Method::GET && *method != Method::HEAD {
return false;
@@ -314,3 +309,10 @@ fn is_trusted_peer(ip: IpAddr) -> bool {
IpAddr::V6(v6) => v6.is_loopback() || v6.is_unique_local() || v6.is_unicast_link_local(),
}
}
+
+fn is_lan_ip(ip: IpAddr) -> bool {
+ match ip {
+ IpAddr::V4(v4) => v4.is_loopback() || v4.is_private() || v4.is_link_local(),
+ IpAddr::V6(v6) => v6.is_loopback() || v6.is_unique_local() || v6.is_unicast_link_local(),
+ }
+}
diff --git a/src/server/mod.rs b/src/server/mod.rs
index 398f15a..8a25495 100644
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -25,7 +25,7 @@ use axum::{
extract::State,
http::{StatusCode, Uri, header},
middleware as axum_middleware,
- response::{IntoResponse, Redirect, Response},
+ response::{IntoResponse, Response},
routing::{delete, get, post},
};
#[cfg(feature = "embed-web")]
@@ -81,7 +81,6 @@ pub struct AppState {
pub library_scanner: Arc,
pub config_path: PathBuf,
pub config_mutable: bool,
- pub base_url: String,
pub hardware_state: HardwareState,
pub hardware_probe_log: Arc>,
pub resources_cache: Arc>>,
@@ -146,11 +145,6 @@ pub async fn run_server(args: RunServerArgs) -> Result<()> {
sys.refresh_cpu_usage();
sys.refresh_memory();
- let base_url = {
- let config = config.read().await;
- config.system.base_url.clone()
- };
-
let state = Arc::new(AppState {
db,
config,
@@ -168,7 +162,6 @@ pub async fn run_server(args: RunServerArgs) -> Result<()> {
library_scanner,
config_path,
config_mutable,
- base_url: base_url.clone(),
hardware_state,
hardware_probe_log,
resources_cache: Arc::new(tokio::sync::Mutex::new(None)),
@@ -180,18 +173,7 @@ pub async fn run_server(args: RunServerArgs) -> Result<()> {
// Clone agent for shutdown handler before moving state into router
let shutdown_agent = state.agent.clone();
- let inner_app = app_router(state.clone());
- let app = if base_url.is_empty() {
- inner_app
- } else {
- let redirect_target = format!("{base_url}/");
- Router::new()
- .route(
- "/",
- get(move || async move { Redirect::permanent(&redirect_target) }),
- )
- .nest(&base_url, inner_app)
- };
+ let app = app_router(state.clone());
let port = std::env::var("ALCHEMIST_SERVER_PORT")
.ok()
@@ -828,7 +810,7 @@ async fn index_handler(State(state): State>) -> impl IntoResponse
static_handler(State(state), Uri::from_static("/index.html")).await
}
-async fn static_handler(State(state): State>, uri: Uri) -> impl IntoResponse {
+async fn static_handler(State(_state): State>, uri: Uri) -> impl IntoResponse {
let raw_path = uri.path().trim_start_matches('/');
let path = match sanitize_asset_path(raw_path) {
Some(path) => path,
@@ -837,11 +819,7 @@ async fn static_handler(State(state): State>, uri: Uri) -> impl In
if let Some(content) = load_static_asset(&path) {
let mime = mime_guess::from_path(&path).first_or_octet_stream();
- return (
- [(header::CONTENT_TYPE, mime.as_ref())],
- maybe_inject_base_url(content, mime.as_ref(), &state.base_url),
- )
- .into_response();
+ return ([(header::CONTENT_TYPE, mime.as_ref())], content).into_response();
}
// Attempt to serve index.html for directory paths (e.g. /jobs -> jobs/index.html)
@@ -849,11 +827,7 @@ async fn static_handler(State(state): State>, uri: Uri) -> impl In
let index_path = format!("{}/index.html", path);
if let Some(content) = load_static_asset(&index_path) {
let mime = mime_guess::from_path("index.html").first_or_octet_stream();
- return (
- [(header::CONTENT_TYPE, mime.as_ref())],
- maybe_inject_base_url(content, mime.as_ref(), &state.base_url),
- )
- .into_response();
+ return ([(header::CONTENT_TYPE, mime.as_ref())], content).into_response();
}
}
@@ -890,14 +864,3 @@ async fn static_handler(State(state): State>, uri: Uri) -> impl In
// Default fallback to 404 for missing files.
StatusCode::NOT_FOUND.into_response()
}
-
-fn maybe_inject_base_url(content: Vec, mime: &str, base_url: &str) -> Vec {
- if !mime.starts_with("text/html") {
- return content;
- }
- let Ok(text) = String::from_utf8(content.clone()) else {
- return content;
- };
- text.replace("__ALCHEMIST_BASE_URL__", base_url)
- .into_bytes()
-}
diff --git a/src/server/tests.rs b/src/server/tests.rs
index 9cb0a4b..d4941fc 100644
--- a/src/server/tests.rs
+++ b/src/server/tests.rs
@@ -114,7 +114,6 @@ where
library_scanner: Arc::new(crate::system::scanner::LibraryScanner::new(db, config)),
config_path: config_path.clone(),
config_mutable: true,
- base_url: String::new(),
hardware_state,
hardware_probe_log,
resources_cache: Arc::new(tokio::sync::Mutex::new(None)),
@@ -211,6 +210,17 @@ fn remote_request(method: Method, uri: &str, body: Body) -> Request {
request
}
+fn lan_request(method: Method, uri: &str, body: Body) -> Request {
+ let mut request = match Request::builder().method(method).uri(uri).body(body) {
+ Ok(request) => request,
+ Err(err) => panic!("failed to build LAN request: {err}"),
+ };
+ request
+ .extensions_mut()
+ .insert(ConnectInfo(SocketAddr::from(([192, 168, 1, 25], 3000))));
+ request
+}
+
async fn body_text(response: axum::response::Response) -> String {
let bytes = match to_bytes(response.into_body(), usize::MAX).await {
Ok(bytes) => bytes,
@@ -740,32 +750,6 @@ async fn read_only_api_token_cannot_access_settings_config()
Ok(())
}
-#[tokio::test]
-async fn nested_base_url_routes_engine_status_through_auth_middleware()
--> std::result::Result<(), Box> {
- let (state, _app, config_path, db_path) = build_test_app(false, 8, |config| {
- config.system.base_url = "/alchemist".to_string();
- })
- .await?;
- let token = create_session(state.db.as_ref()).await?;
- let app = Router::new().nest("/alchemist", app_router(state.clone()));
-
- let response = app
- .oneshot(auth_request(
- Method::GET,
- "/alchemist/api/engine/status",
- &token,
- Body::empty(),
- ))
- .await?;
- assert_eq!(response.status(), StatusCode::OK);
-
- drop(state);
- let _ = std::fs::remove_file(config_path);
- let _ = std::fs::remove_file(db_path);
- Ok(())
-}
-
#[tokio::test]
async fn hardware_probe_log_route_returns_runtime_log()
-> std::result::Result<(), Box> {
@@ -818,12 +802,11 @@ async fn setup_complete_updates_runtime_hardware_without_mirroring_watch_dirs()
let response = app
.clone()
- .oneshot(
- Request::builder()
- .method(Method::POST)
- .uri("/api/setup/complete")
- .header(header::CONTENT_TYPE, "application/json")
- .body(Body::from(
+ .oneshot({
+ let mut request = localhost_request(
+ Method::POST,
+ "/api/setup/complete",
+ Body::from(
json!({
"username": "admin",
"password": "password123",
@@ -838,9 +821,14 @@ async fn setup_complete_updates_runtime_hardware_without_mirroring_watch_dirs()
"quality_profile": "balanced"
})
.to_string(),
- ))
- .unwrap_or_else(|err| panic!("failed to build setup completion request: {err}")),
- )
+ ),
+ );
+ request.headers_mut().insert(
+ header::CONTENT_TYPE,
+ axum::http::HeaderValue::from_static("application/json"),
+ );
+ request
+ })
.await?;
assert_eq!(response.status(), StatusCode::OK);
@@ -932,23 +920,25 @@ async fn setup_complete_accepts_nested_settings_payload()
let response = app
.clone()
- .oneshot(
- Request::builder()
- .method(Method::POST)
- .uri("/api/setup/complete")
- .header(header::CONTENT_TYPE, "application/json")
- .body(Body::from(
+ .oneshot({
+ let mut request = localhost_request(
+ Method::POST,
+ "/api/setup/complete",
+ Body::from(
json!({
"username": "admin",
"password": "password123",
"settings": settings,
})
.to_string(),
- ))
- .unwrap_or_else(|err| {
- panic!("failed to build nested setup completion request: {err}")
- }),
- )
+ ),
+ );
+ request.headers_mut().insert(
+ header::CONTENT_TYPE,
+ axum::http::HeaderValue::from_static("application/json"),
+ );
+ request
+ })
.await?;
assert_eq!(response.status(), StatusCode::OK);
assert!(
@@ -981,23 +971,25 @@ async fn setup_complete_rejects_nested_settings_without_library_directories()
let response = app
.clone()
- .oneshot(
- Request::builder()
- .method(Method::POST)
- .uri("/api/setup/complete")
- .header(header::CONTENT_TYPE, "application/json")
- .body(Body::from(
+ .oneshot({
+ let mut request = localhost_request(
+ Method::POST,
+ "/api/setup/complete",
+ Body::from(
json!({
"username": "admin",
"password": "password123",
"settings": settings,
})
.to_string(),
- ))
- .unwrap_or_else(|err| {
- panic!("failed to build nested setup rejection request: {err}")
- }),
- )
+ ),
+ );
+ request.headers_mut().insert(
+ header::CONTENT_TYPE,
+ axum::http::HeaderValue::from_static("application/json"),
+ );
+ request
+ })
.await?;
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
let body = body_text(response).await;
@@ -1076,7 +1068,7 @@ async fn fs_endpoints_require_loopback_during_setup()
.await?;
assert_eq!(browse_response.status(), StatusCode::FORBIDDEN);
let browse_body = body_text(browse_response).await;
- assert!(browse_body.contains("Filesystem browsing is only available"));
+ assert!(browse_body.contains("local network"));
let mut preview_request = remote_request(
Method::POST,
@@ -1096,12 +1088,78 @@ async fn fs_endpoints_require_loopback_during_setup()
let preview_response = app.clone().oneshot(preview_request).await?;
assert_eq!(preview_response.status(), StatusCode::FORBIDDEN);
let preview_body = body_text(preview_response).await;
- assert!(preview_body.contains("Filesystem browsing is only available"));
+ assert!(preview_body.contains("local network"));
cleanup_paths(&[browse_root, config_path, db_path]);
Ok(())
}
+#[tokio::test]
+async fn setup_html_routes_allow_lan_clients() -> std::result::Result<(), Box>
+{
+ let (_state, app, config_path, db_path) = build_test_app(true, 8, |_| {}).await?;
+
+ let response = app
+ .clone()
+ .oneshot(lan_request(Method::GET, "/setup", Body::empty()))
+ .await?;
+ assert_ne!(response.status(), StatusCode::FORBIDDEN);
+
+ cleanup_paths(&[config_path, db_path]);
+ Ok(())
+}
+
+#[tokio::test]
+async fn setup_html_routes_reject_public_clients()
+-> std::result::Result<(), Box> {
+ let (_state, app, config_path, db_path) = build_test_app(true, 8, |_| {}).await?;
+
+ let response = app
+ .clone()
+ .oneshot(remote_request(Method::GET, "/setup", Body::empty()))
+ .await?;
+ assert_eq!(response.status(), StatusCode::FORBIDDEN);
+ let body = body_text(response).await;
+ assert!(body.contains("only available from the local network"));
+
+ cleanup_paths(&[config_path, db_path]);
+ Ok(())
+}
+
+#[tokio::test]
+async fn setup_status_rejects_public_clients_during_setup()
+-> std::result::Result<(), Box> {
+ let (_state, app, config_path, db_path) = build_test_app(true, 8, |_| {}).await?;
+
+ let response = app
+ .clone()
+ .oneshot(remote_request(
+ Method::GET,
+ "/api/setup/status",
+ Body::empty(),
+ ))
+ .await?;
+ assert_eq!(response.status(), StatusCode::FORBIDDEN);
+
+ cleanup_paths(&[config_path, db_path]);
+ Ok(())
+}
+
+#[tokio::test]
+async fn public_clients_can_reach_login_after_setup()
+-> std::result::Result<(), Box> {
+ let (_state, app, config_path, db_path) = build_test_app(false, 8, |_| {}).await?;
+
+ let response = app
+ .clone()
+ .oneshot(remote_request(Method::GET, "/login", Body::empty()))
+ .await?;
+ assert_ne!(response.status(), StatusCode::FORBIDDEN);
+
+ cleanup_paths(&[config_path, db_path]);
+ Ok(())
+}
+
#[tokio::test]
async fn settings_bundle_requires_auth_after_setup()
-> std::result::Result<(), Box> {
diff --git a/web/src/components/AuthGuard.tsx b/web/src/components/AuthGuard.tsx
index bc34adc..1cd14ef 100644
--- a/web/src/components/AuthGuard.tsx
+++ b/web/src/components/AuthGuard.tsx
@@ -1,6 +1,5 @@
import { useEffect } from "react";
import { apiFetch, apiJson } from "../lib/api";
-import { stripBasePath, withBasePath } from "../lib/basePath";
interface SetupStatus {
setup_required?: boolean;
@@ -11,7 +10,7 @@ export default function AuthGuard() {
let cancelled = false;
const checkAuth = async () => {
- const path = stripBasePath(window.location.pathname);
+ const path = window.location.pathname;
const isAuthPage = path.startsWith("/login") || path.startsWith("/setup");
if (isAuthPage) {
return;
@@ -28,9 +27,7 @@ export default function AuthGuard() {
return;
}
- window.location.href = setupStatus.setup_required
- ? withBasePath("/setup")
- : withBasePath("/login");
+ window.location.href = setupStatus.setup_required ? "/setup" : "/login";
} catch {
// Keep user on current page on transient backend/network failures.
}
diff --git a/web/src/components/ConversionTool.tsx b/web/src/components/ConversionTool.tsx
index 6c31cda..85b601b 100644
--- a/web/src/components/ConversionTool.tsx
+++ b/web/src/components/ConversionTool.tsx
@@ -1,7 +1,6 @@
import { useEffect, useState } from "react";
import { Upload, Wand2, Play, Download, Trash2 } from "lucide-react";
import { apiAction, apiFetch, apiJson, isApiError } from "../lib/api";
-import { withBasePath } from "../lib/basePath";
import { showToast } from "../lib/toast";
interface SubtitleStreamMetadata {
@@ -105,7 +104,7 @@ const DEFAULT_SETTINGS: ConversionSettings = {
},
};
-export default function ConversionTool() {
+export function ConversionTool() {
const [uploading, setUploading] = useState(false);
const [previewing, setPreviewing] = useState(false);
const [starting, setStarting] = useState(false);
@@ -121,13 +120,14 @@ export default function ConversionTool() {
const id = window.setInterval(() => {
void apiJson(`/api/conversion/jobs/${conversionJobId}`)
.then(setStatus)
- .catch(() => {});
+ .catch(() => {
+ });
}, 2000);
return () => window.clearInterval(id);
}, [conversionJobId]);
const updateSettings = (patch: Partial) => {
- setSettings((current) => ({ ...current, ...patch }));
+ setSettings((current) => ({...current, ...patch}));
};
const uploadFile = async (file: File) => {
@@ -157,7 +157,7 @@ export default function ConversionTool() {
} catch (err) {
const message = err instanceof Error ? err.message : "Upload failed";
setError(message);
- showToast({ kind: "error", title: "Conversion", message });
+ showToast({kind: "error", title: "Conversion", message});
} finally {
setUploading(false);
}
@@ -169,7 +169,7 @@ export default function ConversionTool() {
try {
const payload = await apiJson("/api/conversion/preview", {
method: "POST",
- headers: { "Content-Type": "application/json" },
+ headers: {"Content-Type": "application/json"},
body: JSON.stringify({
conversion_job_id: conversionJobId,
settings,
@@ -177,11 +177,11 @@ export default function ConversionTool() {
});
setSettings(payload.normalized_settings);
setCommandPreview(payload.command_preview);
- showToast({ kind: "success", title: "Conversion", message: "Preview updated." });
+ showToast({kind: "success", title: "Conversion", message: "Preview updated."});
} catch (err) {
const message = isApiError(err) ? err.message : "Preview failed";
setError(message);
- showToast({ kind: "error", title: "Conversion", message });
+ showToast({kind: "error", title: "Conversion", message});
} finally {
setPreviewing(false);
}
@@ -191,14 +191,14 @@ export default function ConversionTool() {
if (!conversionJobId) return;
setStarting(true);
try {
- await apiAction(`/api/conversion/jobs/${conversionJobId}/start`, { method: "POST" });
+ await apiAction(`/api/conversion/jobs/${conversionJobId}/start`, {method: "POST"});
const payload = await apiJson(`/api/conversion/jobs/${conversionJobId}`);
setStatus(payload);
- showToast({ kind: "success", title: "Conversion", message: "Conversion job queued." });
+ showToast({kind: "success", title: "Conversion", message: "Conversion job queued."});
} catch (err) {
const message = isApiError(err) ? err.message : "Failed to start conversion";
setError(message);
- showToast({ kind: "error", title: "Conversion", message });
+ showToast({kind: "error", title: "Conversion", message});
} finally {
setStarting(false);
}
@@ -207,23 +207,23 @@ export default function ConversionTool() {
const remove = async () => {
if (!conversionJobId) return;
try {
- await apiAction(`/api/conversion/jobs/${conversionJobId}`, { method: "DELETE" });
+ await apiAction(`/api/conversion/jobs/${conversionJobId}`, {method: "DELETE"});
setConversionJobId(null);
setProbe(null);
setStatus(null);
setSettings(DEFAULT_SETTINGS);
setCommandPreview("");
- showToast({ kind: "success", title: "Conversion", message: "Conversion job removed." });
+ showToast({kind: "success", title: "Conversion", message: "Conversion job removed."});
} catch (err) {
const message = isApiError(err) ? err.message : "Failed to remove conversion job";
setError(message);
- showToast({ kind: "error", title: "Conversion", message });
+ showToast({kind: "error", title: "Conversion", message});
}
};
const download = async () => {
if (!conversionJobId) return;
- window.location.href = withBasePath(`/api/conversion/jobs/${conversionJobId}/download`);
+ window.location.href = `/api/conversion/jobs/${conversionJobId}/download`;
};
return (
@@ -231,22 +231,26 @@ export default function ConversionTool() {
Conversion / Remux
- Upload a single file, inspect the streams, preview the generated FFmpeg command, and run it through Alchemist.
+ Upload a single file, inspect the streams, preview the generated FFmpeg command, and run it through
+ Alchemist.