From 6e382ab4be69a9c62bed886e3e181daf72539667 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Fri, 1 May 2026 09:34:37 -0700 Subject: [PATCH 01/29] feat(release-tracking): add series_tracking sidecar and series_aliases schema Introduce the schema and repository layer for tracked-series release discovery. Establishes the data model that downstream release-source plugins (Nyaa, MangaDex, Suwayomi, MangaUpdates) and the metadata-provider piggyback path will write announcements against. - migration: new m20260501_000067_create_release_tracking adds two tables. series_tracking is a 1:1 sidecar on series (FK cascade) carrying the tracked flag, status, latest known external chapter/volume, and per-series overrides. series_aliases stores matcher-oriented aliases with an alongside Unicode-normalized form. - entities: SeaORM models for both new tables, with reverse relations wired on the series entity (has_one tracking, has_many aliases) so cascades propagate cleanly. - repositories: SeriesTrackingRepository (upsert with Option> clear-vs-leave-alone semantics, status validation, list/count of tracked IDs) and SeriesAliasRepository (idempotent create, bulk_create with insert counting, find_by_normalized for cross-series matching, delete_by_source_for_series for refreshing metadata-derived aliases without touching manual ones). - normalization: Unicode-aware lowercase + strip non-alphanumeric + collapse whitespace, used to compare incoming release titles against stored aliases. External IDs reuse the existing series_external_ids table rather than a parallel structure. series_alternate_titles is intentionally untouched - it is purpose-built for labelled localized titles, which is the wrong shape for arbitrary matcher aliases. Includes unit and integration tests covering entity normalization, status validation, repo CRUD, idempotent inserts, and FK cascade on series delete. --- migration/src/lib.rs | 4 + ...20260503_000072_create_release_tracking.rs | 256 +++++++++++ src/db/entities/mod.rs | 2 + src/db/entities/series.rs | 17 + src/db/entities/series_aliases.rs | 135 ++++++ src/db/entities/series_tracking.rs | 85 ++++ src/db/repositories/mod.rs | 6 + src/db/repositories/series_aliases.rs | 408 +++++++++++++++++ src/db/repositories/series_tracking.rs | 409 ++++++++++++++++++ 9 files changed, 1322 insertions(+) create mode 100644 migration/src/m20260503_000072_create_release_tracking.rs create mode 100644 src/db/entities/series_aliases.rs create mode 100644 src/db/entities/series_tracking.rs create mode 100644 src/db/repositories/series_aliases.rs create mode 100644 src/db/repositories/series_tracking.rs diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 46de62c6..5866cd98 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -147,6 +147,8 @@ mod m20260503_000070_backfill_book_volume_chapter; // Filename retains the original Phase 1 name for git-history continuity; module // now creates the generic `library_jobs` table instead of adding a JSON column. mod m20260503_000071_add_metadata_refresh_config; +// Release tracking (Phase 1): series_tracking sidecar + series_aliases +mod m20260503_000072_create_release_tracking; pub struct Migrator; @@ -264,6 +266,8 @@ impl MigratorTrait for Migrator { Box::new(m20260503_000070_backfill_book_volume_chapter::Migration), // Per-library scheduled metadata refresh config (Phase 1) Box::new(m20260503_000071_add_metadata_refresh_config::Migration), + // Release tracking (Phase 1) + Box::new(m20260503_000072_create_release_tracking::Migration), ] } } diff --git a/migration/src/m20260503_000072_create_release_tracking.rs b/migration/src/m20260503_000072_create_release_tracking.rs new file mode 100644 index 00000000..0eab5b06 --- /dev/null +++ b/migration/src/m20260503_000072_create_release_tracking.rs @@ -0,0 +1,256 @@ +//! Create release-tracking schema (Phase 1 of release-tracking implementation). +//! +//! Adds two tables that augment the existing `series` and `series_external_ids` +//! tables for tracked-series support: +//! +//! - `series_tracking` (1:1 with series, FK cascade): per-series flag + status +//! metadata describing whether the series is being tracked for releases, and +//! the latest known external chapter/volume so the matcher can compute +//! "behind by N." +//! - `series_aliases`: title aliases used by sources without ID-based matching +//! (e.g. Nyaa). Distinct from `series_alternate_titles`, which is purpose-built +//! for labelled localized titles (Japanese/Romaji/English/Korean) - aliases +//! are arbitrary normalized strings used solely for matching incoming release +//! titles against tracked series. +//! +//! External IDs (MangaDex UUID, AniList, MAL, etc.) are stored in the existing +//! `series_external_ids` table and are NOT duplicated here. + +use sea_orm_migration::prelude::*; + +use crate::m20260103_000003_create_series::Series; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let is_postgres = manager.get_database_backend() == sea_orm::DatabaseBackend::Postgres; + + // ---------- series_tracking ---------- + let mut tracking = Table::create(); + tracking + .table(SeriesTracking::Table) + .if_not_exists() + // Primary key is series_id (1:1 sidecar). + .col( + ColumnDef::new(SeriesTracking::SeriesId) + .uuid() + .not_null() + .primary_key(), + ) + .col( + ColumnDef::new(SeriesTracking::Tracked) + .boolean() + .not_null() + .default(false), + ) + // 'ongoing' | 'complete' | 'hiatus' | 'cancelled' | 'unknown' + .col( + ColumnDef::new(SeriesTracking::TrackingStatus) + .string_len(20) + .not_null() + .default("unknown"), + ) + .col( + ColumnDef::new(SeriesTracking::TrackChapters) + .boolean() + .not_null() + .default(true), + ) + .col( + ColumnDef::new(SeriesTracking::TrackVolumes) + .boolean() + .not_null() + .default(true), + ) + // Latest external chapter (decimal to handle 12.5 etc.) and volume. + .col(ColumnDef::new(SeriesTracking::LatestKnownChapter).double()) + .col(ColumnDef::new(SeriesTracking::LatestKnownVolume).integer()) + // Sparse map: { "": { "first": , "last": } } + .col(ColumnDef::new(SeriesTracking::VolumeChapterMap).json_binary()) + // Per-series overrides (null = use source/server default). + .col(ColumnDef::new(SeriesTracking::PollIntervalOverrideS).integer()) + .col(ColumnDef::new(SeriesTracking::ConfidenceThresholdOverride).double()) + .col({ + let mut col = ColumnDef::new(SeriesTracking::CreatedAt); + col.timestamp_with_time_zone().not_null(); + if is_postgres { + col.extra("DEFAULT NOW()"); + } else { + col.extra("DEFAULT CURRENT_TIMESTAMP"); + } + col + }) + .col({ + let mut col = ColumnDef::new(SeriesTracking::UpdatedAt); + col.timestamp_with_time_zone().not_null(); + if is_postgres { + col.extra("DEFAULT NOW()"); + } else { + col.extra("DEFAULT CURRENT_TIMESTAMP"); + } + col + }) + .foreign_key( + ForeignKey::create() + .name("fk_series_tracking_series_id") + .from(SeriesTracking::Table, SeriesTracking::SeriesId) + .to(Series::Table, Series::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::NoAction), + ); + + manager.create_table(tracking.to_owned()).await?; + + // Partial index for the hot path: "list all tracked series." + // Use raw SQL because the DSL's partial-index support is uneven + // across SQLite/Postgres in our SeaORM version. + manager + .get_connection() + .execute_unprepared( + "CREATE INDEX idx_series_tracking_tracked \ + ON series_tracking(series_id) WHERE tracked = TRUE", + ) + .await?; + + // ---------- series_aliases ---------- + let mut aliases = Table::create(); + aliases.table(SeriesAliases::Table).if_not_exists(); + + if is_postgres { + aliases.col( + ColumnDef::new(SeriesAliases::Id) + .uuid() + .not_null() + .primary_key() + .extra("DEFAULT gen_random_uuid()"), + ); + } else { + aliases.col( + ColumnDef::new(SeriesAliases::Id) + .uuid() + .not_null() + .primary_key(), + ); + } + + aliases + .col(ColumnDef::new(SeriesAliases::SeriesId).uuid().not_null()) + .col( + ColumnDef::new(SeriesAliases::Alias) + .string_len(500) + .not_null(), + ) + // Lowercased + punctuation-stripped, used for matching. + .col( + ColumnDef::new(SeriesAliases::Normalized) + .string_len(500) + .not_null(), + ) + // 'metadata' | 'manual' + .col( + ColumnDef::new(SeriesAliases::Source) + .string_len(20) + .not_null(), + ) + .col({ + let mut col = ColumnDef::new(SeriesAliases::CreatedAt); + col.timestamp_with_time_zone().not_null(); + if is_postgres { + col.extra("DEFAULT NOW()"); + } else { + col.extra("DEFAULT CURRENT_TIMESTAMP"); + } + col + }) + .foreign_key( + ForeignKey::create() + .name("fk_series_aliases_series_id") + .from(SeriesAliases::Table, SeriesAliases::SeriesId) + .to(Series::Table, Series::Id) + .on_delete(ForeignKeyAction::Cascade) + .on_update(ForeignKeyAction::NoAction), + ); + + manager.create_table(aliases.to_owned()).await?; + + // Unique on (series_id, alias) - same alias can't be added twice for one series, + // but the same alias string can exist on different series (which is fine and + // expected for ambiguous titles). + manager + .create_index( + Index::create() + .name("idx_series_aliases_unique") + .table(SeriesAliases::Table) + .col(SeriesAliases::SeriesId) + .col(SeriesAliases::Alias) + .unique() + .to_owned(), + ) + .await?; + + // Index on normalized for matcher lookups (most-frequent access pattern). + manager + .create_index( + Index::create() + .name("idx_series_aliases_normalized") + .table(SeriesAliases::Table) + .col(SeriesAliases::Normalized) + .to_owned(), + ) + .await?; + + // FK index for joins back to series. + manager + .create_index( + Index::create() + .name("idx_series_aliases_series_id") + .table(SeriesAliases::Table) + .col(SeriesAliases::SeriesId) + .to_owned(), + ) + .await?; + + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .drop_table(Table::drop().table(SeriesAliases::Table).to_owned()) + .await?; + manager + .drop_table(Table::drop().table(SeriesTracking::Table).to_owned()) + .await?; + Ok(()) + } +} + +#[derive(DeriveIden)] +pub enum SeriesTracking { + Table, + SeriesId, + Tracked, + TrackingStatus, + TrackChapters, + TrackVolumes, + LatestKnownChapter, + LatestKnownVolume, + VolumeChapterMap, + PollIntervalOverrideS, + ConfidenceThresholdOverride, + CreatedAt, + UpdatedAt, +} + +#[derive(DeriveIden)] +pub enum SeriesAliases { + Table, + Id, + SeriesId, + Alias, + Normalized, + Source, + CreatedAt, +} diff --git a/src/db/entities/mod.rs b/src/db/entities/mod.rs index 62265e51..1c21af4f 100644 --- a/src/db/entities/mod.rs +++ b/src/db/entities/mod.rs @@ -42,6 +42,7 @@ pub mod user_plugins; // Series metadata enhancement entities pub mod genres; +pub mod series_aliases; pub mod series_alternate_titles; pub mod series_covers; pub mod series_exports; @@ -51,6 +52,7 @@ pub mod series_external_ratings; pub mod series_genres; pub mod series_metadata; pub mod series_tags; +pub mod series_tracking; pub mod tags; pub mod user_preferences; pub mod user_series_ratings; diff --git a/src/db/entities/series.rs b/src/db/entities/series.rs index 104d9d88..4db670f3 100644 --- a/src/db/entities/series.rs +++ b/src/db/entities/series.rs @@ -60,6 +60,11 @@ pub enum Relation { UserSeriesRatings, #[sea_orm(has_many = "super::series_sharing_tags::Entity")] SeriesSharingTags, + // Release tracking sidecar (1:1) and matcher aliases. + #[sea_orm(has_one = "super::series_tracking::Entity")] + SeriesTracking, + #[sea_orm(has_many = "super::series_aliases::Entity")] + SeriesAliases, } impl Related for Entity { @@ -168,4 +173,16 @@ impl Related for Entity { } } +impl Related for Entity { + fn to() -> RelationDef { + Relation::SeriesTracking.def() + } +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::SeriesAliases.def() + } +} + impl ActiveModelBehavior for ActiveModel {} diff --git a/src/db/entities/series_aliases.rs b/src/db/entities/series_aliases.rs new file mode 100644 index 00000000..a8c17dd9 --- /dev/null +++ b/src/db/entities/series_aliases.rs @@ -0,0 +1,135 @@ +//! `SeaORM` entity for the `series_aliases` table. +//! +//! Title aliases used by release-source plugins that match by title (e.g. +//! Nyaa). Distinct from `series_alternate_titles`, which is purpose-built for +//! labelled localized titles (Japanese / Romaji / English / Korean) with a +//! unique-per-label constraint - aliases here are arbitrary strings, normalized +//! for matching. + +use chrono::{DateTime, Utc}; +use sea_orm::entity::prelude::*; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Eq, Serialize, Deserialize)] +#[sea_orm(table_name = "series_aliases")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: Uuid, + pub series_id: Uuid, + /// The alias as displayed (preserves casing/punctuation for UI). + pub alias: String, + /// Lowercased + punctuation-stripped, used for matcher equality. + pub normalized: String, + /// 'metadata' | 'manual'. + pub source: String, + pub created_at: DateTime, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::series::Entity", + from = "Column::SeriesId", + to = "super::series::Column::Id", + on_update = "NoAction", + on_delete = "Cascade" + )] + Series, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Series.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} + +/// Canonical strings for `source`. +pub mod alias_source { + pub const METADATA: &str = "metadata"; + pub const MANUAL: &str = "manual"; + + pub fn is_valid(s: &str) -> bool { + matches!(s, METADATA | MANUAL) + } +} + +/// Normalize an alias for matching: lowercase, strip non-alphanumeric, collapse whitespace. +/// +/// The normalization is intentionally aggressive: a release titled +/// `"My Series, Vol. 1 (Digital)"` and an alias stored as `"My Series"` should +/// share a common `normalized` prefix so a parser can match against the +/// normalized form. The raw `alias` field preserves the user's input for UI. +pub fn normalize_alias(input: &str) -> String { + let mut out = String::with_capacity(input.len()); + let mut last_was_space = false; + for ch in input.chars() { + if ch.is_alphanumeric() { + for lc in ch.to_lowercase() { + out.push(lc); + } + last_was_space = false; + } else if ch.is_whitespace() && !out.is_empty() && !last_was_space { + out.push(' '); + last_was_space = true; + } + // Any other punctuation/symbols get dropped. + } + if out.ends_with(' ') { + out.pop(); + } + out +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn normalize_lowercases_and_strips_punctuation() { + assert_eq!(normalize_alias("My Hero Academia"), "my hero academia"); + assert_eq!(normalize_alias("My Hero Academia!"), "my hero academia"); + assert_eq!( + normalize_alias("Re:Zero - Starting Life in Another World"), + "rezero starting life in another world" + ); + } + + #[test] + fn normalize_collapses_whitespace() { + assert_eq!(normalize_alias(" Lots of spaces "), "lots of spaces"); + assert_eq!(normalize_alias("Tab\tand\nnewline"), "tab and newline"); + } + + #[test] + fn normalize_strips_digital_suffix_marker() { + // Tag suffixes commonly seen in Nyaa titles. + assert_eq!( + normalize_alias("My Series v01 (Digital)"), + "my series v01 digital" + ); + } + + #[test] + fn normalize_handles_unicode_lowercase() { + // Unicode lowercase round-trip (Greek, German). + assert_eq!(normalize_alias("ÄÖÜ"), "äöü"); + } + + #[test] + fn normalize_empty_input() { + assert_eq!(normalize_alias(""), ""); + assert_eq!(normalize_alias(" "), ""); + assert_eq!(normalize_alias("!!!---!!!"), ""); + } + + #[test] + fn alias_source_validates_known_values() { + assert!(alias_source::is_valid("metadata")); + assert!(alias_source::is_valid("manual")); + assert!(!alias_source::is_valid("auto")); + assert!(!alias_source::is_valid("")); + } +} diff --git a/src/db/entities/series_tracking.rs b/src/db/entities/series_tracking.rs new file mode 100644 index 00000000..97e56f47 --- /dev/null +++ b/src/db/entities/series_tracking.rs @@ -0,0 +1,85 @@ +//! `SeaORM` entity for the `series_tracking` table. +//! +//! 1:1 sidecar to `series` carrying release-tracking flags. Lives in its own +//! table (not on `series` directly) so the subsystem stays cleanly separable - +//! disabling release tracking is a no-join, and removing it later doesn't +//! require a destructive migration on the core series table. + +use chrono::{DateTime, Utc}; +use sea_orm::entity::prelude::*; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Clone, Debug, PartialEq, DeriveEntityModel, Serialize, Deserialize)] +#[sea_orm(table_name = "series_tracking")] +pub struct Model { + /// Primary key AND foreign key to series.id (1:1 sidecar). + #[sea_orm(primary_key, auto_increment = false)] + pub series_id: Uuid, + /// Whether release tracking is enabled for this series. + pub tracked: bool, + /// 'ongoing' | 'complete' | 'hiatus' | 'cancelled' | 'unknown'. + pub tracking_status: String, + pub track_chapters: bool, + pub track_volumes: bool, + /// Latest external chapter (decimal handles 12.5, 110.1, etc.). + pub latest_known_chapter: Option, + pub latest_known_volume: Option, + /// Sparse map: `{ "": { "first": , "last": } }`. + pub volume_chapter_map: Option, + /// Per-series override of the source's poll interval (seconds). Null = use source default. + pub poll_interval_override_s: Option, + /// Per-series override of the server's confidence threshold. Null = use server default. + pub confidence_threshold_override: Option, + pub created_at: DateTime, + pub updated_at: DateTime, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation { + #[sea_orm( + belongs_to = "super::series::Entity", + from = "Column::SeriesId", + to = "super::series::Column::Id", + on_update = "NoAction", + on_delete = "Cascade" + )] + Series, +} + +impl Related for Entity { + fn to() -> RelationDef { + Relation::Series.def() + } +} + +impl ActiveModelBehavior for ActiveModel {} + +/// Canonical strings for `tracking_status`. +pub mod tracking_status { + pub const ONGOING: &str = "ongoing"; + pub const COMPLETE: &str = "complete"; + pub const HIATUS: &str = "hiatus"; + pub const CANCELLED: &str = "cancelled"; + pub const UNKNOWN: &str = "unknown"; + + pub fn is_valid(s: &str) -> bool { + matches!(s, ONGOING | COMPLETE | HIATUS | CANCELLED | UNKNOWN) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn tracking_status_validates_known_values() { + assert!(tracking_status::is_valid("ongoing")); + assert!(tracking_status::is_valid("complete")); + assert!(tracking_status::is_valid("hiatus")); + assert!(tracking_status::is_valid("cancelled")); + assert!(tracking_status::is_valid("unknown")); + assert!(!tracking_status::is_valid("paused")); + assert!(!tracking_status::is_valid("")); + } +} diff --git a/src/db/repositories/mod.rs b/src/db/repositories/mod.rs index 81aaa89a..3b78ab7b 100644 --- a/src/db/repositories/mod.rs +++ b/src/db/repositories/mod.rs @@ -18,10 +18,12 @@ pub mod plugin_failures; pub mod plugins; pub mod read_progress; pub mod series; +pub mod series_aliases; pub mod series_covers; pub mod series_export; pub mod series_external_id; pub mod series_metadata; +pub mod series_tracking; pub mod settings; pub mod tag; pub mod task; @@ -64,10 +66,14 @@ pub use plugin_failures::{FailureContext, PluginFailuresRepository}; pub use plugins::PluginsRepository; pub use read_progress::ReadProgressRepository; pub use series::{SeriesQueryOptions, SeriesQuerySort, SeriesRepository, SeriesSortFieldRepo}; +#[allow(unused_imports)] +pub use series_aliases::SeriesAliasRepository; pub use series_covers::SeriesCoversRepository; pub use series_export::SeriesExportRepository; pub use series_external_id::SeriesExternalIdRepository; pub use series_metadata::SeriesMetadataRepository; +#[allow(unused_imports)] +pub use series_tracking::{SeriesTrackingRepository, TrackingUpdate}; pub use settings::SettingsRepository; pub use tag::TagRepository; pub use task::TaskRepository; diff --git a/src/db/repositories/series_aliases.rs b/src/db/repositories/series_aliases.rs new file mode 100644 index 00000000..fdf595de --- /dev/null +++ b/src/db/repositories/series_aliases.rs @@ -0,0 +1,408 @@ +//! Repository for the `series_aliases` table. +//! +//! Title aliases used by release-source plugins to match incoming release +//! titles against tracked series when an external ID isn't available (e.g. +//! Nyaa). Distinct from `alternate_title.rs` which manages localized titles +//! with labels. + +#![allow(dead_code)] + +use anyhow::Result; +use chrono::Utc; +use sea_orm::{ + ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, PaginatorTrait, QueryFilter, + Set, +}; +use std::collections::HashMap; +use uuid::Uuid; + +use crate::db::entities::series_aliases::{ + self, Entity as SeriesAliases, Model as SeriesAlias, alias_source, normalize_alias, +}; + +pub struct SeriesAliasRepository; + +impl SeriesAliasRepository { + /// Get an alias row by id. + pub async fn get_by_id(db: &DatabaseConnection, id: Uuid) -> Result> { + Ok(SeriesAliases::find_by_id(id).one(db).await?) + } + + /// Get all aliases for a series, ordered by alias for stable display. + pub async fn get_for_series( + db: &DatabaseConnection, + series_id: Uuid, + ) -> Result> { + use sea_orm::QueryOrder; + let results = SeriesAliases::find() + .filter(series_aliases::Column::SeriesId.eq(series_id)) + .order_by_asc(series_aliases::Column::Alias) + .all(db) + .await?; + Ok(results) + } + + /// Bulk-fetch aliases for many series, returned as a HashMap keyed by series_id. + pub async fn get_for_series_ids( + db: &DatabaseConnection, + series_ids: &[Uuid], + ) -> Result>> { + if series_ids.is_empty() { + return Ok(HashMap::new()); + } + let results = SeriesAliases::find() + .filter(series_aliases::Column::SeriesId.is_in(series_ids.to_vec())) + .all(db) + .await?; + let mut map: HashMap> = HashMap::new(); + for row in results { + map.entry(row.series_id).or_default().push(row); + } + Ok(map) + } + + /// Find every series whose normalized alias equals `normalized`. + /// Returns rows so the caller can reach `series_id` and the original alias. + pub async fn find_by_normalized( + db: &DatabaseConnection, + normalized: &str, + ) -> Result> { + Ok(SeriesAliases::find() + .filter(series_aliases::Column::Normalized.eq(normalized)) + .all(db) + .await?) + } + + /// Create an alias. Returns the existing row if `(series_id, alias)` + /// already exists - aliases are idempotent on add. + pub async fn create( + db: &DatabaseConnection, + series_id: Uuid, + alias: &str, + source: &str, + ) -> Result { + if !alias_source::is_valid(source) { + anyhow::bail!("invalid alias source: {}", source); + } + let trimmed = alias.trim(); + if trimmed.is_empty() { + anyhow::bail!("alias cannot be empty"); + } + + // Idempotent on (series_id, alias). + if let Some(existing) = SeriesAliases::find() + .filter(series_aliases::Column::SeriesId.eq(series_id)) + .filter(series_aliases::Column::Alias.eq(trimmed)) + .one(db) + .await? + { + return Ok(existing); + } + + let normalized = normalize_alias(trimmed); + if normalized.is_empty() { + anyhow::bail!("alias normalizes to empty string"); + } + + let active = series_aliases::ActiveModel { + id: Set(Uuid::new_v4()), + series_id: Set(series_id), + alias: Set(trimmed.to_string()), + normalized: Set(normalized), + source: Set(source.to_string()), + created_at: Set(Utc::now()), + }; + Ok(active.insert(db).await?) + } + + /// Bulk-insert aliases for a series. Existing aliases (by normalized text) + /// are skipped. Returns the number of newly inserted rows. + pub async fn bulk_create( + db: &DatabaseConnection, + series_id: Uuid, + aliases: &[&str], + source: &str, + ) -> Result { + if !alias_source::is_valid(source) { + anyhow::bail!("invalid alias source: {}", source); + } + let mut inserted = 0; + for alias in aliases { + // Skip blanks defensively; create() also checks but a noisy upstream + // shouldn't cause a hard error here. + if alias.trim().is_empty() { + continue; + } + // create() is idempotent; we count only true inserts by checking before/after. + let before = Self::count_for_series_with_alias(db, series_id, alias.trim()).await?; + Self::create(db, series_id, alias, source).await?; + let after = Self::count_for_series_with_alias(db, series_id, alias.trim()).await?; + if after > before { + inserted += 1; + } + } + Ok(inserted) + } + + async fn count_for_series_with_alias( + db: &DatabaseConnection, + series_id: Uuid, + alias: &str, + ) -> Result { + let count = SeriesAliases::find() + .filter(series_aliases::Column::SeriesId.eq(series_id)) + .filter(series_aliases::Column::Alias.eq(alias)) + .count(db) + .await?; + Ok(count) + } + + /// Delete an alias by id. Returns true if a row was removed. + pub async fn delete(db: &DatabaseConnection, id: Uuid) -> Result { + let result = SeriesAliases::delete_by_id(id).exec(db).await?; + Ok(result.rows_affected > 0) + } + + /// Delete all aliases from a given source for a series. Useful for + /// "refresh metadata-sourced aliases" without touching manual aliases. + pub async fn delete_by_source_for_series( + db: &DatabaseConnection, + series_id: Uuid, + source: &str, + ) -> Result { + let result = SeriesAliases::delete_many() + .filter(series_aliases::Column::SeriesId.eq(series_id)) + .filter(series_aliases::Column::Source.eq(source)) + .exec(db) + .await?; + Ok(result.rows_affected) + } + + /// Delete all aliases for a series (independent of cascade). + pub async fn delete_all_for_series(db: &DatabaseConnection, series_id: Uuid) -> Result { + let result = SeriesAliases::delete_many() + .filter(series_aliases::Column::SeriesId.eq(series_id)) + .exec(db) + .await?; + Ok(result.rows_affected) + } + + /// Count aliases for a series. + pub async fn count_for_series(db: &DatabaseConnection, series_id: Uuid) -> Result { + let count = SeriesAliases::find() + .filter(series_aliases::Column::SeriesId.eq(series_id)) + .count(db) + .await?; + Ok(count) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::ScanningStrategy; + use crate::db::repositories::{LibraryRepository, SeriesRepository}; + use crate::db::test_helpers::create_test_db; + + async fn make_two_series(db: &DatabaseConnection) -> (Uuid, Uuid) { + let library = LibraryRepository::create(db, "Lib", "/lib", ScanningStrategy::Default) + .await + .unwrap(); + let s1 = SeriesRepository::create(db, library.id, "Series 1", None) + .await + .unwrap(); + let s2 = SeriesRepository::create(db, library.id, "Series 2", None) + .await + .unwrap(); + (s1.id, s2.id) + } + + #[tokio::test] + async fn create_inserts_with_normalized_form() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + let row = SeriesAliasRepository::create(conn, s1, "My Hero Academia!", "manual") + .await + .unwrap(); + assert_eq!(row.alias, "My Hero Academia!"); + assert_eq!(row.normalized, "my hero academia"); + assert_eq!(row.source, "manual"); + } + + #[tokio::test] + async fn create_is_idempotent_per_series() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + let r1 = SeriesAliasRepository::create(conn, s1, "Boku no Hero", "manual") + .await + .unwrap(); + let r2 = SeriesAliasRepository::create(conn, s1, "Boku no Hero", "manual") + .await + .unwrap(); + assert_eq!(r1.id, r2.id, "same alias on same series returns same row"); + + let count = SeriesAliasRepository::count_for_series(conn, s1) + .await + .unwrap(); + assert_eq!(count, 1); + } + + #[tokio::test] + async fn same_alias_allowed_on_different_series() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, s2) = make_two_series(conn).await; + + let a = SeriesAliasRepository::create(conn, s1, "Common Title", "metadata") + .await + .unwrap(); + let b = SeriesAliasRepository::create(conn, s2, "Common Title", "metadata") + .await + .unwrap(); + assert_ne!(a.id, b.id); + assert_eq!(a.normalized, b.normalized); + } + + #[tokio::test] + async fn create_rejects_blank_or_punctuation_only() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + let err = SeriesAliasRepository::create(conn, s1, " ", "manual") + .await + .unwrap_err(); + assert!(err.to_string().contains("empty")); + + let err = SeriesAliasRepository::create(conn, s1, "!!!---!!!", "manual") + .await + .unwrap_err(); + assert!(err.to_string().contains("normalize")); + } + + #[tokio::test] + async fn create_rejects_invalid_source() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + let err = SeriesAliasRepository::create(conn, s1, "X", "auto") + .await + .unwrap_err(); + assert!(err.to_string().contains("invalid alias source")); + } + + #[tokio::test] + async fn find_by_normalized_returns_all_matches() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, s2) = make_two_series(conn).await; + + SeriesAliasRepository::create(conn, s1, "My Series", "manual") + .await + .unwrap(); + SeriesAliasRepository::create(conn, s2, "MY SERIES!", "metadata") + .await + .unwrap(); + SeriesAliasRepository::create(conn, s1, "Other Title", "manual") + .await + .unwrap(); + + let matches = SeriesAliasRepository::find_by_normalized(conn, "my series") + .await + .unwrap(); + assert_eq!(matches.len(), 2, "both series share normalized 'my series'"); + let mut series_ids: Vec = matches.into_iter().map(|m| m.series_id).collect(); + series_ids.sort(); + let mut expected = [s1, s2]; + expected.sort(); + assert_eq!(series_ids, expected); + } + + #[tokio::test] + async fn bulk_create_dedups_and_counts_inserts() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + let inserted = SeriesAliasRepository::bulk_create( + conn, + s1, + &["Title A", "Title B", "Title A", ""], + "metadata", + ) + .await + .unwrap(); + assert_eq!(inserted, 2, "blank skipped, duplicate dedup'd"); + + let again = + SeriesAliasRepository::bulk_create(conn, s1, &["Title A", "Title C"], "metadata") + .await + .unwrap(); + assert_eq!(again, 1, "Title A already present, only Title C is new"); + + let count = SeriesAliasRepository::count_for_series(conn, s1) + .await + .unwrap(); + assert_eq!(count, 3); + } + + #[tokio::test] + async fn delete_by_source_only_touches_that_source() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + SeriesAliasRepository::create(conn, s1, "Manual One", "manual") + .await + .unwrap(); + SeriesAliasRepository::create(conn, s1, "Meta One", "metadata") + .await + .unwrap(); + SeriesAliasRepository::create(conn, s1, "Meta Two", "metadata") + .await + .unwrap(); + + let removed = SeriesAliasRepository::delete_by_source_for_series(conn, s1, "metadata") + .await + .unwrap(); + assert_eq!(removed, 2); + + let remaining = SeriesAliasRepository::get_for_series(conn, s1) + .await + .unwrap(); + assert_eq!(remaining.len(), 1); + assert_eq!(remaining[0].source, "manual"); + } + + #[tokio::test] + async fn cascade_deletes_aliases_when_series_deleted() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (s1, _) = make_two_series(conn).await; + + SeriesAliasRepository::create(conn, s1, "Will Be Cascaded", "manual") + .await + .unwrap(); + SeriesRepository::delete(conn, s1).await.unwrap(); + + let after = SeriesAliasRepository::get_for_series(conn, s1) + .await + .unwrap(); + assert!(after.is_empty()); + } + + #[tokio::test] + async fn get_for_series_ids_handles_empty_input() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let map = SeriesAliasRepository::get_for_series_ids(conn, &[]) + .await + .unwrap(); + assert!(map.is_empty()); + } +} diff --git a/src/db/repositories/series_tracking.rs b/src/db/repositories/series_tracking.rs new file mode 100644 index 00000000..7f99865e --- /dev/null +++ b/src/db/repositories/series_tracking.rs @@ -0,0 +1,409 @@ +//! Repository for the `series_tracking` sidecar table. +//! +//! Provides 1:1 read/write access to release-tracking metadata for a series +//! (whether it's tracked, current external chapter/volume, per-series overrides, +//! etc.). This repository is intentionally narrow - it doesn't reach into +//! `series_external_ids` (already its own repo) or `series_aliases` (sibling +//! repo); the release-tracking service composes them. + +#![allow(dead_code)] + +use anyhow::Result; +use chrono::Utc; +use sea_orm::{ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter, Set}; +use uuid::Uuid; + +use crate::db::entities::series_tracking::{ + self, Entity as SeriesTracking, Model as SeriesTrackingRow, tracking_status, +}; + +/// Parameters for upserting a tracking row. Each `Option>` distinguishes +/// "leave alone" (`None`) from "explicitly clear" (`Some(None)`). +#[derive(Debug, Default, Clone)] +pub struct TrackingUpdate { + pub tracked: Option, + pub tracking_status: Option, + pub track_chapters: Option, + pub track_volumes: Option, + /// Outer `None` = leave alone; inner `None` = clear. + pub latest_known_chapter: Option>, + pub latest_known_volume: Option>, + pub volume_chapter_map: Option>, + pub poll_interval_override_s: Option>, + pub confidence_threshold_override: Option>, +} + +pub struct SeriesTrackingRepository; + +impl SeriesTrackingRepository { + /// Get the tracking row for a series, if one exists. + pub async fn get( + db: &DatabaseConnection, + series_id: Uuid, + ) -> Result> { + let result = SeriesTracking::find_by_id(series_id).one(db).await?; + Ok(result) + } + + /// Get the tracking row, defaulting to a virtual untracked row if none exists. + /// The returned row is NOT persisted unless explicitly upserted. + pub async fn get_or_default( + db: &DatabaseConnection, + series_id: Uuid, + ) -> Result { + if let Some(row) = Self::get(db, series_id).await? { + return Ok(row); + } + let now = Utc::now(); + Ok(SeriesTrackingRow { + series_id, + tracked: false, + tracking_status: tracking_status::UNKNOWN.to_string(), + track_chapters: true, + track_volumes: true, + latest_known_chapter: None, + latest_known_volume: None, + volume_chapter_map: None, + poll_interval_override_s: None, + confidence_threshold_override: None, + created_at: now, + updated_at: now, + }) + } + + /// Upsert: insert if missing, otherwise apply the update fields. Fields with + /// `None` in `update` are left untouched. + pub async fn upsert( + db: &DatabaseConnection, + series_id: Uuid, + update: TrackingUpdate, + ) -> Result { + // Validate tracking_status before doing any DB work. + if let Some(ref status) = update.tracking_status + && !tracking_status::is_valid(status) + { + anyhow::bail!("invalid tracking_status: {}", status); + } + + let now = Utc::now(); + let existing = SeriesTracking::find_by_id(series_id).one(db).await?; + + match existing { + Some(existing) => { + let mut active: series_tracking::ActiveModel = existing.into(); + if let Some(v) = update.tracked { + active.tracked = Set(v); + } + if let Some(v) = update.tracking_status { + active.tracking_status = Set(v); + } + if let Some(v) = update.track_chapters { + active.track_chapters = Set(v); + } + if let Some(v) = update.track_volumes { + active.track_volumes = Set(v); + } + if let Some(v) = update.latest_known_chapter { + active.latest_known_chapter = Set(v); + } + if let Some(v) = update.latest_known_volume { + active.latest_known_volume = Set(v); + } + if let Some(v) = update.volume_chapter_map { + active.volume_chapter_map = Set(v); + } + if let Some(v) = update.poll_interval_override_s { + active.poll_interval_override_s = Set(v); + } + if let Some(v) = update.confidence_threshold_override { + active.confidence_threshold_override = Set(v); + } + active.updated_at = Set(now); + let model = active.update(db).await?; + Ok(model) + } + None => { + let active = series_tracking::ActiveModel { + series_id: Set(series_id), + tracked: Set(update.tracked.unwrap_or(false)), + tracking_status: Set(update + .tracking_status + .unwrap_or_else(|| tracking_status::UNKNOWN.to_string())), + track_chapters: Set(update.track_chapters.unwrap_or(true)), + track_volumes: Set(update.track_volumes.unwrap_or(true)), + latest_known_chapter: Set(update.latest_known_chapter.unwrap_or(None)), + latest_known_volume: Set(update.latest_known_volume.unwrap_or(None)), + volume_chapter_map: Set(update.volume_chapter_map.unwrap_or(None)), + poll_interval_override_s: Set(update.poll_interval_override_s.unwrap_or(None)), + confidence_threshold_override: Set(update + .confidence_threshold_override + .unwrap_or(None)), + created_at: Set(now), + updated_at: Set(now), + }; + let model = active.insert(db).await?; + Ok(model) + } + } + } + + /// Convenience: toggle `tracked` on an existing or virtual row. + pub async fn set_tracked( + db: &DatabaseConnection, + series_id: Uuid, + tracked: bool, + ) -> Result { + Self::upsert( + db, + series_id, + TrackingUpdate { + tracked: Some(tracked), + ..Default::default() + }, + ) + .await + } + + /// List all tracked series IDs. Used by the polling service to enumerate + /// what to ask plugins for. Paginated to keep memory bounded for large + /// libraries; pass `limit = 0` for no limit (callers should normally page). + pub async fn list_tracked_ids( + db: &DatabaseConnection, + limit: u64, + offset: u64, + ) -> Result> { + use sea_orm::QuerySelect; + let mut query = SeriesTracking::find().filter(series_tracking::Column::Tracked.eq(true)); + if limit > 0 { + query = query.limit(limit); + } + if offset > 0 { + query = query.offset(offset); + } + let results = query.all(db).await?; + Ok(results.into_iter().map(|m| m.series_id).collect()) + } + + /// Count tracked series. + pub async fn count_tracked(db: &DatabaseConnection) -> Result { + use sea_orm::PaginatorTrait; + let count = SeriesTracking::find() + .filter(series_tracking::Column::Tracked.eq(true)) + .count(db) + .await?; + Ok(count) + } + + /// Delete the tracking row for a series. Cascade from series delete handles + /// the normal case; this is for explicit user-initiated "stop tracking and + /// forget overrides." + pub async fn delete(db: &DatabaseConnection, series_id: Uuid) -> Result { + let result = SeriesTracking::delete_by_id(series_id).exec(db).await?; + Ok(result.rows_affected > 0) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::ScanningStrategy; + use crate::db::repositories::{LibraryRepository, SeriesRepository}; + use crate::db::test_helpers::create_test_db; + + async fn make_series(db: &DatabaseConnection) -> Uuid { + let library = + LibraryRepository::create(db, "Test Library", "/test/path", ScanningStrategy::Default) + .await + .unwrap(); + let series = SeriesRepository::create(db, library.id, "Test Series", None) + .await + .unwrap(); + series.id + } + + #[tokio::test] + async fn get_returns_none_when_no_row() { + let (db, _temp) = create_test_db().await; + let series_id = make_series(db.sea_orm_connection()).await; + + let row = SeriesTrackingRepository::get(db.sea_orm_connection(), series_id) + .await + .unwrap(); + assert!(row.is_none()); + } + + #[tokio::test] + async fn get_or_default_returns_untracked_row() { + let (db, _temp) = create_test_db().await; + let series_id = make_series(db.sea_orm_connection()).await; + + let row = SeriesTrackingRepository::get_or_default(db.sea_orm_connection(), series_id) + .await + .unwrap(); + assert_eq!(row.series_id, series_id); + assert!(!row.tracked); + assert_eq!(row.tracking_status, "unknown"); + assert!(row.track_chapters); + assert!(row.track_volumes); + } + + #[tokio::test] + async fn upsert_inserts_then_updates() { + let (db, _temp) = create_test_db().await; + let series_id = make_series(db.sea_orm_connection()).await; + + // First upsert inserts. + let row = SeriesTrackingRepository::upsert( + db.sea_orm_connection(), + series_id, + TrackingUpdate { + tracked: Some(true), + tracking_status: Some("ongoing".to_string()), + latest_known_chapter: Some(Some(142.0)), + ..Default::default() + }, + ) + .await + .unwrap(); + assert!(row.tracked); + assert_eq!(row.tracking_status, "ongoing"); + assert_eq!(row.latest_known_chapter, Some(142.0)); + + // Second upsert updates only specified fields. + let row2 = SeriesTrackingRepository::upsert( + db.sea_orm_connection(), + series_id, + TrackingUpdate { + latest_known_chapter: Some(Some(143.0)), + ..Default::default() + }, + ) + .await + .unwrap(); + assert!(row2.tracked, "tracked should be preserved"); + assert_eq!( + row2.tracking_status, "ongoing", + "status should be preserved" + ); + assert_eq!(row2.latest_known_chapter, Some(143.0)); + } + + #[tokio::test] + async fn upsert_can_clear_optional_fields() { + let (db, _temp) = create_test_db().await; + let series_id = make_series(db.sea_orm_connection()).await; + + SeriesTrackingRepository::upsert( + db.sea_orm_connection(), + series_id, + TrackingUpdate { + latest_known_chapter: Some(Some(50.0)), + ..Default::default() + }, + ) + .await + .unwrap(); + + // Explicit clear via Some(None). + let cleared = SeriesTrackingRepository::upsert( + db.sea_orm_connection(), + series_id, + TrackingUpdate { + latest_known_chapter: Some(None), + ..Default::default() + }, + ) + .await + .unwrap(); + assert_eq!(cleared.latest_known_chapter, None); + } + + #[tokio::test] + async fn upsert_rejects_invalid_status() { + let (db, _temp) = create_test_db().await; + let series_id = make_series(db.sea_orm_connection()).await; + + let err = SeriesTrackingRepository::upsert( + db.sea_orm_connection(), + series_id, + TrackingUpdate { + tracking_status: Some("paused".to_string()), + ..Default::default() + }, + ) + .await + .unwrap_err(); + assert!(err.to_string().contains("invalid tracking_status")); + } + + #[tokio::test] + async fn set_tracked_toggles_flag() { + let (db, _temp) = create_test_db().await; + let series_id = make_series(db.sea_orm_connection()).await; + + let row = SeriesTrackingRepository::set_tracked(db.sea_orm_connection(), series_id, true) + .await + .unwrap(); + assert!(row.tracked); + + let row = SeriesTrackingRepository::set_tracked(db.sea_orm_connection(), series_id, false) + .await + .unwrap(); + assert!(!row.tracked); + } + + #[tokio::test] + async fn list_tracked_ids_filters_to_tracked() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let library = LibraryRepository::create(conn, "Lib", "/lib", ScanningStrategy::Default) + .await + .unwrap(); + let s1 = SeriesRepository::create(conn, library.id, "A", None) + .await + .unwrap(); + let s2 = SeriesRepository::create(conn, library.id, "B", None) + .await + .unwrap(); + let _s3 = SeriesRepository::create(conn, library.id, "C", None) + .await + .unwrap(); + + SeriesTrackingRepository::set_tracked(conn, s1.id, true) + .await + .unwrap(); + SeriesTrackingRepository::set_tracked(conn, s2.id, false) + .await + .unwrap(); + // s3 has no tracking row at all. + + let ids = SeriesTrackingRepository::list_tracked_ids(conn, 0, 0) + .await + .unwrap(); + assert_eq!(ids.len(), 1); + assert_eq!(ids[0], s1.id); + + let count = SeriesTrackingRepository::count_tracked(conn).await.unwrap(); + assert_eq!(count, 1); + } + + #[tokio::test] + async fn cascade_deletes_tracking_when_series_deleted() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let series_id = make_series(conn).await; + + SeriesTrackingRepository::set_tracked(conn, series_id, true) + .await + .unwrap(); + + // Delete the series; tracking should follow via FK cascade. + SeriesRepository::delete(conn, series_id).await.unwrap(); + + let row = SeriesTrackingRepository::get(conn, series_id) + .await + .unwrap(); + assert!(row.is_none(), "tracking row should be cascaded away"); + } +} From 4b8cde1d662ccc745763d7bd6fbbf02d70abeefd Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Fri, 1 May 2026 11:23:14 -0700 Subject: [PATCH 02/29] feat(release-tracking): add backfill task, tracking API, and UI surface Wires up the user-facing surface on top of the schema landed in dccba8c. Admins can now flip a series to tracked, manage matcher aliases, and bulk- mark via the existing series toolbar. Backend: - BackfillTrackingFromMetadata task seeds series_aliases from series_metadata.title/title_sort and alternate titles. Idempotent on re-run; per-series error isolation; never modifies the tracked flag. - Five HTTP endpoints under /api/v1/series/{series_id}: GET/PATCH /tracking and GET/POST/DELETE for /aliases. PATCH uses Option> via a double_option serde helper so JSON null clears a field while omitted leaves it alone. Idempotent alias create returns 200 instead of 201 on duplicate. New "Tracking" OpenAPI tag. Frontend: - TrackingPanel Mantine card on series detail (admin-only render) for the tracked toggle, status, chapter/volume announce flags, latest known chapter/volume, and the alias list with inline add/remove. - useSeriesTracking and four sibling hooks plus a tracking.ts API client. - "Mark as Tracked" / "Mark as Untracked" entries added to the existing bulk-selection toolbar; fans Promise.allSettled per-series PATCH calls rather than introducing a bulk endpoint at this scale. Tests added on both sides (backfill handler, tracking HTTP integration, TrackingPanel component, and bulk-toolbar tracking actions). OpenAPI spec and generated TypeScript types regenerated. --- docs/api/openapi.json | 533 ++++++++++++++++++ src/api/docs.rs | 15 + src/api/routes/v1/dto/mod.rs | 3 + src/api/routes/v1/dto/tracking.rs | 207 +++++++ src/api/routes/v1/handlers/mod.rs | 1 + src/api/routes/v1/handlers/tracking.rs | 339 +++++++++++ src/api/routes/v1/routes/series.rs | 22 + src/tasks/handlers/backfill_tracking.rs | 365 ++++++++++++ src/tasks/handlers/mod.rs | 2 + src/tasks/types.rs | 22 + src/tasks/worker.rs | 18 +- tests/api.rs | 1 + tests/api/tracking.rs | 383 +++++++++++++ web/openapi.json | 533 ++++++++++++++++++ web/src/api/tracking.ts | 51 ++ .../library/BulkSelectionToolbar.test.tsx | 60 ++ .../library/BulkSelectionToolbar.tsx | 76 ++- .../components/series/TrackingPanel.test.tsx | 160 ++++++ web/src/components/series/TrackingPanel.tsx | 263 +++++++++ web/src/components/series/index.ts | 1 + web/src/hooks/useSeriesTracking.ts | 86 +++ web/src/pages/SeriesDetail.tsx | 6 + web/src/types/api.generated.ts | 423 ++++++++++++++ 23 files changed, 3563 insertions(+), 7 deletions(-) create mode 100644 src/api/routes/v1/dto/tracking.rs create mode 100644 src/api/routes/v1/handlers/tracking.rs create mode 100644 src/tasks/handlers/backfill_tracking.rs create mode 100644 tests/api/tracking.rs create mode 100644 web/src/api/tracking.ts create mode 100644 web/src/components/series/TrackingPanel.test.tsx create mode 100644 web/src/components/series/TrackingPanel.tsx create mode 100644 web/src/hooks/useSeriesTracking.ts diff --git a/docs/api/openapi.json b/docs/api/openapi.json index b2e200e7..27d7693d 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -8789,6 +8789,172 @@ ] } }, + "/api/v1/series/{series_id}/aliases": { + "get": { + "tags": [ + "Tracking" + ], + "summary": "List release-matching aliases for a series.", + "operationId": "list_series_aliases", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "List of aliases", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesAliasListResponse" + } + } + } + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + }, + "post": { + "tags": [ + "Tracking" + ], + "summary": "Create a release-matching alias for a series.", + "description": "Idempotent: if `(series_id, alias)` already exists, returns the existing\nrow with HTTP 200 instead of inserting a duplicate.", + "operationId": "create_series_alias", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateSeriesAliasRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Alias already existed (idempotent)", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesAliasDto" + } + } + } + }, + "201": { + "description": "Alias created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesAliasDto" + } + } + } + }, + "400": { + "description": "Invalid alias (empty after normalization)" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, + "/api/v1/series/{series_id}/aliases/{alias_id}": { + "delete": { + "tags": [ + "Tracking" + ], + "summary": "Delete a release-matching alias.", + "operationId": "delete_series_alias", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "name": "alias_id", + "in": "path", + "description": "Alias ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "Alias deleted" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series or alias not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/{series_id}/alternate-titles": { "get": { "tags": [ @@ -11459,6 +11625,113 @@ ] } }, + "/api/v1/series/{series_id}/tracking": { + "get": { + "tags": [ + "Tracking" + ], + "summary": "Get release-tracking config for a series.", + "description": "Returns a virtual untracked row when no `series_tracking` row exists, so the\nfrontend can render the panel uniformly without special-casing absent rows.", + "operationId": "get_series_tracking", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "Tracking config", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesTrackingDto" + } + } + } + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + }, + "patch": { + "tags": [ + "Tracking" + ], + "summary": "Update release-tracking config for a series.", + "description": "Upserts: creates the row on first write, applies the patch otherwise.\nAll fields are optional — omit to leave alone, send `null` on a nullable\nfield to clear it.", + "operationId": "update_series_tracking", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateSeriesTrackingRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Tracking config updated", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesTrackingDto" + } + } + } + }, + "400": { + "description": "Invalid tracking_status" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/{series_id}/unread": { "post": { "tags": [ @@ -21386,6 +21659,26 @@ } } }, + "CreateSeriesAliasRequest": { + "type": "object", + "required": [ + "alias" + ], + "properties": { + "alias": { + "type": "string", + "description": "Alias text. Will be trimmed; must normalize to non-empty.", + "example": "Boku no Hero Academia" + }, + "source": { + "type": [ + "string", + "null" + ], + "description": "Optional explicit source. Defaults to `manual` when called from the API.\nPlugin-internal flows write `metadata`; we don't expose that to HTTP." + } + } + }, "CreateSeriesExportRequest": { "type": "object", "description": "Request body for creating a new series export", @@ -32435,6 +32728,64 @@ } } }, + "SeriesAliasDto": { + "type": "object", + "description": "Title alias used by release-source plugins to match incoming releases by\ntitle (Nyaa, MangaUpdates without an external ID, etc.).", + "required": [ + "id", + "seriesId", + "alias", + "normalized", + "source", + "createdAt" + ], + "properties": { + "alias": { + "type": "string", + "description": "Alias as entered (preserves casing/punctuation).", + "example": "My Hero Academia" + }, + "createdAt": { + "type": "string", + "format": "date-time" + }, + "id": { + "type": "string", + "format": "uuid", + "description": "Alias row ID.", + "example": "550e8400-e29b-41d4-a716-446655440100" + }, + "normalized": { + "type": "string", + "description": "Lowercased + punctuation-stripped form used for matching.", + "example": "my hero academia" + }, + "seriesId": { + "type": "string", + "format": "uuid", + "example": "550e8400-e29b-41d4-a716-446655440002" + }, + "source": { + "type": "string", + "description": "`metadata` (auto-derived) | `manual` (user-entered).", + "example": "manual" + } + } + }, + "SeriesAliasListResponse": { + "type": "object", + "required": [ + "aliases" + ], + "properties": { + "aliases": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SeriesAliasDto" + } + } + } + }, "SeriesAverageRatingResponse": { "type": "object", "description": "Response containing the average community rating for a series", @@ -33504,6 +33855,89 @@ "custom" ] }, + "SeriesTrackingDto": { + "type": "object", + "description": "Per-series release-tracking configuration.\n\nReturned even for untracked series — the row defaults to `tracked: false`\nwith conservative defaults so the frontend can render the panel without\nspecial-casing missing rows.", + "required": [ + "seriesId", + "tracked", + "trackingStatus", + "trackChapters", + "trackVolumes", + "createdAt", + "updatedAt" + ], + "properties": { + "confidenceThresholdOverride": { + "type": [ + "number", + "null" + ], + "format": "double", + "description": "Per-series override of the server's confidence threshold (0.0 - 1.0)." + }, + "createdAt": { + "type": "string", + "format": "date-time", + "description": "When the row was created (epoch when virtual)." + }, + "latestKnownChapter": { + "type": [ + "number", + "null" + ], + "format": "double", + "description": "Latest known external chapter (supports decimals like 12.5)." + }, + "latestKnownVolume": { + "type": [ + "integer", + "null" + ], + "format": "int32", + "description": "Latest known external volume." + }, + "pollIntervalOverrideS": { + "type": [ + "integer", + "null" + ], + "format": "int32", + "description": "Per-series override of the source poll interval (seconds)." + }, + "seriesId": { + "type": "string", + "format": "uuid", + "description": "Series ID this config belongs to.", + "example": "550e8400-e29b-41d4-a716-446655440002" + }, + "trackChapters": { + "type": "boolean", + "description": "Whether to announce new chapters." + }, + "trackVolumes": { + "type": "boolean", + "description": "Whether to announce new volumes." + }, + "tracked": { + "type": "boolean", + "description": "Whether release tracking is enabled." + }, + "trackingStatus": { + "type": "string", + "description": "Publication status: `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`.", + "example": "ongoing" + }, + "updatedAt": { + "type": "string", + "format": "date-time", + "description": "When the row was last updated (epoch when virtual)." + }, + "volumeChapterMap": { + "description": "Sparse map of `{ \"\": { \"first\": ch, \"last\": ch } }`." + } + } + }, "SeriesUpdateResponse": { "type": "object", "description": "Response for series update", @@ -35383,6 +35817,40 @@ "format": "uuid" } } + }, + { + "type": "object", + "description": "Backfill release-tracking aliases from existing series metadata.\n\nWalks series in scope, harvests the canonical title plus alternate titles\nfrom `series_metadata` and `series_alternate_titles`, and seeds them as\n`metadata`-source aliases in `series_aliases`. Idempotent — re-runs do\nnot create duplicates. Does NOT enable tracking; that stays explicit.", + "required": [ + "type" + ], + "properties": { + "libraryId": { + "type": [ + "string", + "null" + ], + "format": "uuid", + "description": "If set, scope to this library; otherwise all series." + }, + "seriesIds": { + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "uuid" + }, + "description": "If set, scope to these specific series (takes precedence over library_id)." + }, + "type": { + "type": "string", + "enum": [ + "backfill_tracking_from_metadata" + ] + } + } } ], "description": "Task types supported by the distributed task queue" @@ -36475,6 +36943,67 @@ } } }, + "UpdateSeriesTrackingRequest": { + "type": "object", + "description": "PATCH payload for tracking config. All fields are optional:\nomit a field to leave it untouched. Use a JSON `null` on a nullable field\nto clear it explicitly.", + "properties": { + "confidenceThresholdOverride": { + "type": [ + "number", + "null" + ], + "format": "double" + }, + "latestKnownChapter": { + "type": [ + "number", + "null" + ], + "format": "double", + "description": "Use `Some(null)` to clear, `Some()` to set, omit to leave alone." + }, + "latestKnownVolume": { + "type": [ + "integer", + "null" + ], + "format": "int32" + }, + "pollIntervalOverrideS": { + "type": [ + "integer", + "null" + ], + "format": "int32" + }, + "trackChapters": { + "type": [ + "boolean", + "null" + ] + }, + "trackVolumes": { + "type": [ + "boolean", + "null" + ] + }, + "tracked": { + "type": [ + "boolean", + "null" + ] + }, + "trackingStatus": { + "type": [ + "string", + "null" + ], + "description": "`ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`." + }, + "volumeChapterMap": {} + } + }, "UpdateSettingRequest": { "type": "object", "description": "Update setting request", @@ -37359,6 +37888,10 @@ "name": "Series", "description": "Series browsing and search endpoints" }, + { + "name": "Tracking", + "description": "Release-tracking config and matcher aliases" + }, { "name": "Books", "description": "Book details and metadata endpoints" diff --git a/src/api/docs.rs b/src/api/docs.rs index 1ca5d0ba..394a2449 100644 --- a/src/api/docs.rs +++ b/src/api/docs.rs @@ -256,6 +256,13 @@ The following paths are exempt from rate limiting: v1::handlers::create_series_external_id, v1::handlers::delete_series_external_id, + // Release-tracking config + aliases + v1::handlers::tracking::get_series_tracking, + v1::handlers::tracking::update_series_tracking, + v1::handlers::tracking::list_series_aliases, + v1::handlers::tracking::create_series_alias, + v1::handlers::tracking::delete_series_alias, + // Cover management endpoints v1::handlers::list_series_covers, v1::handlers::get_series_cover_image, @@ -678,6 +685,13 @@ The following paths are exempt from rate limiting: v1::dto::CreateAlternateTitleRequest, v1::dto::UpdateAlternateTitleRequest, + // Release-tracking DTOs + v1::dto::tracking::SeriesTrackingDto, + v1::dto::tracking::UpdateSeriesTrackingRequest, + v1::dto::tracking::SeriesAliasDto, + v1::dto::tracking::SeriesAliasListResponse, + v1::dto::tracking::CreateSeriesAliasRequest, + // External Rating DTOs v1::dto::ExternalRatingDto, v1::dto::ExternalRatingListResponse, @@ -1002,6 +1016,7 @@ The following paths are exempt from rate limiting: // Library Content (name = "Libraries", description = "Library management endpoints"), (name = "Series", description = "Series browsing and search endpoints"), + (name = "Tracking", description = "Release-tracking config and matcher aliases"), (name = "Books", description = "Book details and metadata endpoints"), (name = "Pages", description = "Page image serving endpoints"), diff --git a/src/api/routes/v1/dto/mod.rs b/src/api/routes/v1/dto/mod.rs index 43df7639..1ae935f2 100644 --- a/src/api/routes/v1/dto/mod.rs +++ b/src/api/routes/v1/dto/mod.rs @@ -29,6 +29,7 @@ pub mod settings; pub mod setup; pub mod sharing_tag; pub mod task_metrics; +pub mod tracking; pub mod user; pub mod user_plugins; pub mod user_preferences; @@ -62,6 +63,8 @@ pub use settings::*; pub use setup::*; pub use sharing_tag::*; pub use task_metrics::*; +#[allow(unused_imports)] +pub use tracking::*; pub use user::*; #[allow(unused_imports)] pub use user_plugins::*; diff --git a/src/api/routes/v1/dto/tracking.rs b/src/api/routes/v1/dto/tracking.rs new file mode 100644 index 00000000..550cbdad --- /dev/null +++ b/src/api/routes/v1/dto/tracking.rs @@ -0,0 +1,207 @@ +//! DTOs for release-tracking config and aliases endpoints. +//! +//! Maps the `series_tracking` sidecar and `series_aliases` table onto the v1 +//! HTTP API. Distinct from `series_alternate_titles` — aliases here are +//! arbitrary matcher strings, not labelled localized titles. + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; +use uuid::Uuid; + +use crate::db::entities::{series_aliases, series_tracking}; + +// ============================================================================= +// Tracking config DTOs +// ============================================================================= + +/// Per-series release-tracking configuration. +/// +/// Returned even for untracked series — the row defaults to `tracked: false` +/// with conservative defaults so the frontend can render the panel without +/// special-casing missing rows. +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SeriesTrackingDto { + /// Series ID this config belongs to. + #[schema(example = "550e8400-e29b-41d4-a716-446655440002")] + pub series_id: Uuid, + /// Whether release tracking is enabled. + pub tracked: bool, + /// Publication status: `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`. + #[schema(example = "ongoing")] + pub tracking_status: String, + /// Whether to announce new chapters. + pub track_chapters: bool, + /// Whether to announce new volumes. + pub track_volumes: bool, + /// Latest known external chapter (supports decimals like 12.5). + #[serde(skip_serializing_if = "Option::is_none")] + pub latest_known_chapter: Option, + /// Latest known external volume. + #[serde(skip_serializing_if = "Option::is_none")] + pub latest_known_volume: Option, + /// Sparse map of `{ "": { "first": ch, "last": ch } }`. + #[serde(skip_serializing_if = "Option::is_none")] + pub volume_chapter_map: Option, + /// Per-series override of the source poll interval (seconds). + #[serde(skip_serializing_if = "Option::is_none")] + pub poll_interval_override_s: Option, + /// Per-series override of the server's confidence threshold (0.0 - 1.0). + #[serde(skip_serializing_if = "Option::is_none")] + pub confidence_threshold_override: Option, + /// When the row was created (epoch when virtual). + pub created_at: DateTime, + /// When the row was last updated (epoch when virtual). + pub updated_at: DateTime, +} + +impl From for SeriesTrackingDto { + fn from(m: series_tracking::Model) -> Self { + Self { + series_id: m.series_id, + tracked: m.tracked, + tracking_status: m.tracking_status, + track_chapters: m.track_chapters, + track_volumes: m.track_volumes, + latest_known_chapter: m.latest_known_chapter, + latest_known_volume: m.latest_known_volume, + volume_chapter_map: m.volume_chapter_map, + poll_interval_override_s: m.poll_interval_override_s, + confidence_threshold_override: m.confidence_threshold_override, + created_at: m.created_at, + updated_at: m.updated_at, + } + } +} + +/// PATCH payload for tracking config. All fields are optional: +/// omit a field to leave it untouched. Use a JSON `null` on a nullable field +/// to clear it explicitly. +#[derive(Debug, Clone, Default, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct UpdateSeriesTrackingRequest { + pub tracked: Option, + /// `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`. + pub tracking_status: Option, + pub track_chapters: Option, + pub track_volumes: Option, + /// Use `Some(null)` to clear, `Some()` to set, omit to leave alone. + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "double_option" + )] + pub latest_known_chapter: Option>, + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "double_option" + )] + pub latest_known_volume: Option>, + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "double_option" + )] + pub volume_chapter_map: Option>, + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "double_option" + )] + pub poll_interval_override_s: Option>, + #[serde( + default, + skip_serializing_if = "Option::is_none", + with = "double_option" + )] + pub confidence_threshold_override: Option>, +} + +/// `Option>` SerDe helper: distinguishes "field omitted" from "field +/// present and null". The default `Option` flattens both, which collapses +/// the "leave alone vs. clear" distinction we need for PATCH semantics. +mod double_option { + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(value: &Option>, ser: S) -> Result + where + S: Serializer, + T: Serialize, + { + match value { + Some(Some(v)) => v.serialize(ser), + Some(None) => ser.serialize_none(), + None => ser.serialize_none(), + } + } + + pub fn deserialize<'de, D, T>(de: D) -> Result>, D::Error> + where + D: Deserializer<'de>, + T: Deserialize<'de>, + { + // Field is present (otherwise serde would call `default`); read it as + // `Option` so explicit null becomes `Some(None)` and a present value + // becomes `Some(Some(v))`. + Option::::deserialize(de).map(Some) + } +} + +// ============================================================================= +// Aliases DTOs +// ============================================================================= + +/// Title alias used by release-source plugins to match incoming releases by +/// title (Nyaa, MangaUpdates without an external ID, etc.). +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SeriesAliasDto { + /// Alias row ID. + #[schema(example = "550e8400-e29b-41d4-a716-446655440100")] + pub id: Uuid, + #[schema(example = "550e8400-e29b-41d4-a716-446655440002")] + pub series_id: Uuid, + /// Alias as entered (preserves casing/punctuation). + #[schema(example = "My Hero Academia")] + pub alias: String, + /// Lowercased + punctuation-stripped form used for matching. + #[schema(example = "my hero academia")] + pub normalized: String, + /// `metadata` (auto-derived) | `manual` (user-entered). + #[schema(example = "manual")] + pub source: String, + pub created_at: DateTime, +} + +impl From for SeriesAliasDto { + fn from(m: series_aliases::Model) -> Self { + Self { + id: m.id, + series_id: m.series_id, + alias: m.alias, + normalized: m.normalized, + source: m.source, + created_at: m.created_at, + } + } +} + +#[derive(Debug, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SeriesAliasListResponse { + pub aliases: Vec, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct CreateSeriesAliasRequest { + /// Alias text. Will be trimmed; must normalize to non-empty. + #[schema(example = "Boku no Hero Academia")] + pub alias: String, + /// Optional explicit source. Defaults to `manual` when called from the API. + /// Plugin-internal flows write `metadata`; we don't expose that to HTTP. + #[serde(default)] + pub source: Option, +} diff --git a/src/api/routes/v1/handlers/mod.rs b/src/api/routes/v1/handlers/mod.rs index 76b77dc1..2d209f62 100644 --- a/src/api/routes/v1/handlers/mod.rs +++ b/src/api/routes/v1/handlers/mod.rs @@ -69,6 +69,7 @@ pub mod setup; pub mod sharing_tags; pub mod task_metrics; pub mod task_queue; +pub mod tracking; pub mod user_plugins; pub mod user_preferences; pub mod users; diff --git a/src/api/routes/v1/handlers/tracking.rs b/src/api/routes/v1/handlers/tracking.rs new file mode 100644 index 00000000..f775ba7a --- /dev/null +++ b/src/api/routes/v1/handlers/tracking.rs @@ -0,0 +1,339 @@ +//! HTTP handlers for release-tracking config + title aliases. +//! +//! Endpoints (all under `/api/v1/series/{series_id}`): +//! - `GET /tracking` — read (returns a virtual untracked row when none exists) +//! - `PATCH /tracking` — update (upserts on first write) +//! - `GET /aliases` — list aliases for the series +//! - `POST /aliases` — add a manual alias (idempotent on duplicate) +//! - `DELETE /aliases/{alias_id}` — remove an alias + +use axum::{ + Json, + extract::{Path, State}, + http::StatusCode, +}; +use chrono::Utc; +use std::sync::Arc; +use uuid::Uuid; + +use super::super::dto::tracking::{ + CreateSeriesAliasRequest, SeriesAliasDto, SeriesAliasListResponse, SeriesTrackingDto, + UpdateSeriesTrackingRequest, +}; +use crate::api::{ + error::ApiError, + extractors::{AuthContext, AuthState}, + permissions::Permission, +}; +use crate::db::entities::series_aliases::alias_source; +use crate::db::repositories::{ + SeriesAliasRepository, SeriesRepository, SeriesTrackingRepository, TrackingUpdate, +}; +use crate::events::{EntityChangeEvent, EntityEvent}; +use crate::require_permission; + +// ============================================================================= +// Tracking config handlers +// ============================================================================= + +/// Get release-tracking config for a series. +/// +/// Returns a virtual untracked row when no `series_tracking` row exists, so the +/// frontend can render the panel uniformly without special-casing absent rows. +#[utoipa::path( + get, + path = "/api/v1/series/{series_id}/tracking", + params( + ("series_id" = Uuid, Path, description = "Series ID") + ), + responses( + (status = 200, description = "Tracking config", body = SeriesTrackingDto), + (status = 404, description = "Series not found"), + (status = 403, description = "Forbidden"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Tracking" +)] +pub async fn get_series_tracking( + State(state): State>, + auth: AuthContext, + Path(series_id): Path, +) -> Result, ApiError> { + require_permission!(auth, Permission::SeriesRead)?; + + SeriesRepository::get_by_id(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? + .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; + + let row = SeriesTrackingRepository::get_or_default(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch tracking: {}", e)))?; + Ok(Json(row.into())) +} + +/// Update release-tracking config for a series. +/// +/// Upserts: creates the row on first write, applies the patch otherwise. +/// All fields are optional — omit to leave alone, send `null` on a nullable +/// field to clear it. +#[utoipa::path( + patch, + path = "/api/v1/series/{series_id}/tracking", + params( + ("series_id" = Uuid, Path, description = "Series ID") + ), + request_body = UpdateSeriesTrackingRequest, + responses( + (status = 200, description = "Tracking config updated", body = SeriesTrackingDto), + (status = 400, description = "Invalid tracking_status"), + (status = 404, description = "Series not found"), + (status = 403, description = "Forbidden"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Tracking" +)] +pub async fn update_series_tracking( + State(state): State>, + auth: AuthContext, + Path(series_id): Path, + Json(request): Json, +) -> Result, ApiError> { + require_permission!(auth, Permission::SeriesWrite)?; + + let series = SeriesRepository::get_by_id(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? + .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; + + let update = TrackingUpdate { + tracked: request.tracked, + tracking_status: request.tracking_status, + track_chapters: request.track_chapters, + track_volumes: request.track_volumes, + latest_known_chapter: request.latest_known_chapter, + latest_known_volume: request.latest_known_volume, + volume_chapter_map: request.volume_chapter_map, + poll_interval_override_s: request.poll_interval_override_s, + confidence_threshold_override: request.confidence_threshold_override, + }; + + let row = SeriesTrackingRepository::upsert(&state.db, series_id, update) + .await + .map_err(|e| { + // Surface validation errors (e.g., invalid tracking_status) as 400. + if e.to_string().contains("invalid tracking_status") { + ApiError::BadRequest(e.to_string()) + } else { + ApiError::Internal(format!("Failed to update tracking: {}", e)) + } + })?; + + let event = EntityChangeEvent { + event: EntityEvent::SeriesUpdated { + series_id, + library_id: series.library_id, + fields: Some(vec!["tracking".to_string()]), + }, + timestamp: Utc::now(), + user_id: Some(auth.user_id), + }; + let _ = state.event_broadcaster.emit(event); + + Ok(Json(row.into())) +} + +// ============================================================================= +// Alias handlers +// ============================================================================= + +/// List release-matching aliases for a series. +#[utoipa::path( + get, + path = "/api/v1/series/{series_id}/aliases", + params( + ("series_id" = Uuid, Path, description = "Series ID") + ), + responses( + (status = 200, description = "List of aliases", body = SeriesAliasListResponse), + (status = 404, description = "Series not found"), + (status = 403, description = "Forbidden"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Tracking" +)] +pub async fn list_series_aliases( + State(state): State>, + auth: AuthContext, + Path(series_id): Path, +) -> Result, ApiError> { + require_permission!(auth, Permission::SeriesRead)?; + + SeriesRepository::get_by_id(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? + .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; + + let aliases = SeriesAliasRepository::get_for_series(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch aliases: {}", e)))?; + + Ok(Json(SeriesAliasListResponse { + aliases: aliases.into_iter().map(Into::into).collect(), + })) +} + +/// Create a release-matching alias for a series. +/// +/// Idempotent: if `(series_id, alias)` already exists, returns the existing +/// row with HTTP 200 instead of inserting a duplicate. +#[utoipa::path( + post, + path = "/api/v1/series/{series_id}/aliases", + params( + ("series_id" = Uuid, Path, description = "Series ID") + ), + request_body = CreateSeriesAliasRequest, + responses( + (status = 201, description = "Alias created", body = SeriesAliasDto), + (status = 200, description = "Alias already existed (idempotent)", body = SeriesAliasDto), + (status = 400, description = "Invalid alias (empty after normalization)"), + (status = 404, description = "Series not found"), + (status = 403, description = "Forbidden"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Tracking" +)] +pub async fn create_series_alias( + State(state): State>, + auth: AuthContext, + Path(series_id): Path, + Json(request): Json, +) -> Result<(StatusCode, Json), ApiError> { + require_permission!(auth, Permission::SeriesWrite)?; + + let series = SeriesRepository::get_by_id(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? + .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; + + // Determine source. HTTP defaults to `manual`; we accept `metadata` only + // for explicit admin imports (e.g., a follow-up tool that wants to seed + // metadata-source aliases through the API rather than the backfill task). + let source = request + .source + .as_deref() + .filter(|s| alias_source::is_valid(s)) + .unwrap_or(alias_source::MANUAL); + + // Detect insert-vs-existing by counting before/after — `create()` returns + // the existing row on duplicate, but doesn't tell us which case we hit. + let before = SeriesAliasRepository::count_for_series(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to count aliases: {}", e)))?; + let alias = SeriesAliasRepository::create(&state.db, series_id, &request.alias, source) + .await + .map_err(|e| { + let msg = e.to_string(); + if msg.contains("empty") + || msg.contains("normalize") + || msg.contains("invalid alias source") + { + ApiError::BadRequest(msg) + } else { + ApiError::Internal(format!("Failed to create alias: {}", e)) + } + })?; + let after = SeriesAliasRepository::count_for_series(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to count aliases: {}", e)))?; + + let status = if after > before { + // Newly inserted: emit update event so the frontend invalidates its cache. + let event = EntityChangeEvent { + event: EntityEvent::SeriesUpdated { + series_id, + library_id: series.library_id, + fields: Some(vec!["aliases".to_string()]), + }, + timestamp: Utc::now(), + user_id: Some(auth.user_id), + }; + let _ = state.event_broadcaster.emit(event); + StatusCode::CREATED + } else { + StatusCode::OK + }; + + Ok((status, Json(alias.into()))) +} + +/// Delete a release-matching alias. +#[utoipa::path( + delete, + path = "/api/v1/series/{series_id}/aliases/{alias_id}", + params( + ("series_id" = Uuid, Path, description = "Series ID"), + ("alias_id" = Uuid, Path, description = "Alias ID") + ), + responses( + (status = 204, description = "Alias deleted"), + (status = 404, description = "Series or alias not found"), + (status = 403, description = "Forbidden"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Tracking" +)] +pub async fn delete_series_alias( + State(state): State>, + auth: AuthContext, + Path((series_id, alias_id)): Path<(Uuid, Uuid)>, +) -> Result { + require_permission!(auth, Permission::SeriesWrite)?; + + let series = SeriesRepository::get_by_id(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? + .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; + + // Verify the alias actually belongs to this series before deleting. + let row = SeriesAliasRepository::get_by_id(&state.db, alias_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch alias: {}", e)))?; + let row = match row { + Some(r) if r.series_id == series_id => r, + _ => return Err(ApiError::NotFound("Alias not found".to_string())), + }; + + SeriesAliasRepository::delete(&state.db, row.id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to delete alias: {}", e)))?; + + let event = EntityChangeEvent { + event: EntityEvent::SeriesUpdated { + series_id, + library_id: series.library_id, + fields: Some(vec!["aliases".to_string()]), + }, + timestamp: Utc::now(), + user_id: Some(auth.user_id), + }; + let _ = state.event_broadcaster.emit(event); + + Ok(StatusCode::NO_CONTENT) +} diff --git a/src/api/routes/v1/routes/series.rs b/src/api/routes/v1/routes/series.rs index bc374b51..67060e2a 100644 --- a/src/api/routes/v1/routes/series.rs +++ b/src/api/routes/v1/routes/series.rs @@ -337,4 +337,26 @@ pub fn routes(_state: Arc) -> Router> { "/series/{series_id}/title/reprocess", post(handlers::task_queue::reprocess_series_title), ) + // Release-tracking config (per series) + .route( + "/series/{series_id}/tracking", + get(handlers::tracking::get_series_tracking), + ) + .route( + "/series/{series_id}/tracking", + patch(handlers::tracking::update_series_tracking), + ) + // Release-matching aliases (per series) + .route( + "/series/{series_id}/aliases", + get(handlers::tracking::list_series_aliases), + ) + .route( + "/series/{series_id}/aliases", + post(handlers::tracking::create_series_alias), + ) + .route( + "/series/{series_id}/aliases/{alias_id}", + delete(handlers::tracking::delete_series_alias), + ) } diff --git a/src/tasks/handlers/backfill_tracking.rs b/src/tasks/handlers/backfill_tracking.rs new file mode 100644 index 00000000..022b597c --- /dev/null +++ b/src/tasks/handlers/backfill_tracking.rs @@ -0,0 +1,365 @@ +//! `BackfillTrackingFromMetadata` task handler. +//! +//! Walks series in scope and seeds `series_aliases` rows from existing metadata +//! (canonical title + alternate titles). Idempotent on re-run — `SeriesAliasRepository::create` +//! returns the existing row when the same alias already exists for a series. +//! +//! Does NOT toggle `tracked`. Enabling tracking is always an explicit user +//! action; this task is a one-time data-prep pass that the admin can run after +//! upgrading or after a metadata refresh. + +use anyhow::Result; +use sea_orm::DatabaseConnection; +use std::sync::Arc; +use tracing::{debug, info, warn}; +use uuid::Uuid; + +use crate::db::entities::series_aliases::alias_source; +use crate::db::entities::tasks; +use crate::db::repositories::{ + AlternateTitleRepository, SeriesAliasRepository, SeriesMetadataRepository, SeriesRepository, +}; +use crate::events::EventBroadcaster; +use crate::tasks::handlers::TaskHandler; +use crate::tasks::types::TaskResult; + +pub struct BackfillTrackingFromMetadataHandler; + +impl BackfillTrackingFromMetadataHandler { + pub fn new() -> Self { + Self + } +} + +impl Default for BackfillTrackingFromMetadataHandler { + fn default() -> Self { + Self::new() + } +} + +impl TaskHandler for BackfillTrackingFromMetadataHandler { + fn handle<'a>( + &'a self, + task: &'a tasks::Model, + db: &'a DatabaseConnection, + _event_broadcaster: Option<&'a Arc>, + ) -> std::pin::Pin> + Send + 'a>> { + Box::pin(async move { + let library_id = task.library_id; + let series_ids: Option> = task + .params + .as_ref() + .and_then(|p| p.get("series_ids")) + .and_then(|v| serde_json::from_value(v.clone()).ok()); + + let scope = describe_scope(library_id, series_ids.as_deref()); + info!("Task {}: Backfilling tracking aliases ({})", task.id, scope); + + let series_to_process = resolve_series_scope(db, library_id, series_ids).await?; + let total = series_to_process.len(); + info!("Found {} series in scope", total); + + let mut summary = BackfillSummary::default(); + for series_id in series_to_process { + match backfill_one(db, series_id).await { + Ok(per_series) => { + summary.merge(per_series); + } + Err(e) => { + warn!("Backfill failed for series {}: {}", series_id, e); + summary.errors += 1; + } + } + } + + info!( + "Backfill complete ({}): {} series processed, {} aliases inserted, {} skipped, {} errors", + scope, + summary.processed, + summary.aliases_inserted, + summary.aliases_skipped_duplicate, + summary.errors, + ); + + Ok(TaskResult::success_with_data( + format!( + "Processed {} series, inserted {} new aliases ({} duplicates skipped, {} errors)", + summary.processed, + summary.aliases_inserted, + summary.aliases_skipped_duplicate, + summary.errors, + ), + serde_json::json!({ + "scope": scope, + "series_processed": summary.processed, + "aliases_inserted": summary.aliases_inserted, + "aliases_skipped_duplicate": summary.aliases_skipped_duplicate, + "errors": summary.errors, + }), + )) + }) + } +} + +#[derive(Default)] +struct BackfillSummary { + processed: usize, + aliases_inserted: usize, + aliases_skipped_duplicate: usize, + errors: usize, +} + +impl BackfillSummary { + fn merge(&mut self, other: PerSeriesSummary) { + self.processed += 1; + self.aliases_inserted += other.inserted; + self.aliases_skipped_duplicate += other.skipped_duplicate; + } +} + +#[derive(Default)] +struct PerSeriesSummary { + inserted: usize, + skipped_duplicate: usize, +} + +fn describe_scope(library_id: Option, series_ids: Option<&[Uuid]>) -> String { + match (library_id, series_ids) { + (_, Some(ids)) => format!("scope=series_ids:{}", ids.len()), + (Some(lib), _) => format!("scope=library:{}", lib), + (None, None) => "scope=all".to_string(), + } +} + +async fn resolve_series_scope( + db: &DatabaseConnection, + library_id: Option, + series_ids: Option>, +) -> Result> { + if let Some(ids) = series_ids { + return Ok(ids); + } + if let Some(lib_id) = library_id { + let series_list = SeriesRepository::list_by_library(db, lib_id).await?; + return Ok(series_list.into_iter().map(|s| s.id).collect()); + } + let all = SeriesRepository::list_all(db).await?; + Ok(all.into_iter().map(|s| s.id).collect()) +} + +async fn backfill_one(db: &DatabaseConnection, series_id: Uuid) -> Result { + let metadata = match SeriesMetadataRepository::get_by_series_id(db, series_id).await? { + Some(m) => m, + None => { + // Metadata is required for a series to exist normally; if missing, + // the series row is in an unexpected state - skip it. + debug!("Series {} has no metadata, skipping", series_id); + return Ok(PerSeriesSummary::default()); + } + }; + + let mut candidates: Vec = Vec::new(); + candidates.push(metadata.title.clone()); + if let Some(sort) = metadata.title_sort.as_ref() + && !sort.trim().is_empty() + { + candidates.push(sort.clone()); + } + + let alt_titles = AlternateTitleRepository::get_for_series(db, series_id).await?; + for alt in alt_titles { + if !alt.title.trim().is_empty() { + candidates.push(alt.title); + } + } + + let mut summary = PerSeriesSummary::default(); + for alias in candidates { + let trimmed = alias.trim(); + if trimmed.is_empty() { + continue; + } + // Track inserts vs idempotent skips by counting before/after. + let before = SeriesAliasRepository::count_for_series(db, series_id).await?; + match SeriesAliasRepository::create(db, series_id, trimmed, alias_source::METADATA).await { + Ok(_) => { + let after = SeriesAliasRepository::count_for_series(db, series_id).await?; + if after > before { + summary.inserted += 1; + } else { + summary.skipped_duplicate += 1; + } + } + Err(e) => { + // Aliases that normalize to empty (e.g., "!!!---!!!" entries from + // odd metadata) are non-fatal — log and skip. + debug!( + "Skipping alias '{}' for series {}: {}", + trimmed, series_id, e + ); + } + } + } + Ok(summary) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::ScanningStrategy; + use crate::db::repositories::{LibraryRepository, SeriesAliasRepository, SeriesRepository}; + use crate::db::test_helpers::create_test_db; + + async fn make_series( + db: &DatabaseConnection, + library_id: Uuid, + name: &str, + japanese: Option<&str>, + ) -> Uuid { + let series = SeriesRepository::create(db, library_id, name, None) + .await + .unwrap(); + if let Some(jp) = japanese { + AlternateTitleRepository::create(db, series.id, "Japanese", jp) + .await + .unwrap(); + } + series.id + } + + #[tokio::test] + async fn handler_seeds_aliases_from_title_and_alternates() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s1 = make_series( + conn, + lib.id, + "My Hero Academia", + Some("僕のヒーローアカデミア"), + ) + .await; + + let summary = backfill_one(conn, s1).await.unwrap(); + assert_eq!(summary.inserted, 2); + assert_eq!(summary.skipped_duplicate, 0); + + let aliases = SeriesAliasRepository::get_for_series(conn, s1) + .await + .unwrap(); + let texts: Vec<&str> = aliases.iter().map(|a| a.alias.as_str()).collect(); + assert!(texts.contains(&"My Hero Academia")); + assert!(texts.contains(&"僕のヒーローアカデミア")); + assert!(aliases.iter().all(|a| a.source == "metadata")); + } + + #[tokio::test] + async fn handler_is_idempotent_on_rerun() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s1 = make_series(conn, lib.id, "Series A", Some("Alt A")).await; + + let first = backfill_one(conn, s1).await.unwrap(); + assert_eq!(first.inserted, 2); + + let second = backfill_one(conn, s1).await.unwrap(); + assert_eq!(second.inserted, 0, "re-run should not insert duplicates"); + assert_eq!(second.skipped_duplicate, 2); + + let aliases = SeriesAliasRepository::get_for_series(conn, s1) + .await + .unwrap(); + assert_eq!(aliases.len(), 2); + } + + #[tokio::test] + async fn handler_does_not_enable_tracking() { + use crate::db::repositories::SeriesTrackingRepository; + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s1 = make_series(conn, lib.id, "Some Title", None).await; + + backfill_one(conn, s1).await.unwrap(); + + let row = SeriesTrackingRepository::get(conn, s1).await.unwrap(); + assert!( + row.is_none(), + "backfill should not create or modify tracking row" + ); + } + + #[tokio::test] + async fn resolve_scope_prefers_explicit_series_ids() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s1 = make_series(conn, lib.id, "A", None).await; + let _s2 = make_series(conn, lib.id, "B", None).await; + + let scoped = resolve_series_scope(conn, Some(lib.id), Some(vec![s1])) + .await + .unwrap(); + assert_eq!(scoped, vec![s1]); + } + + #[tokio::test] + async fn resolve_scope_library_returns_all_in_library() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib1 = LibraryRepository::create(conn, "L1", "/p1", ScanningStrategy::Default) + .await + .unwrap(); + let lib2 = LibraryRepository::create(conn, "L2", "/p2", ScanningStrategy::Default) + .await + .unwrap(); + let _a = make_series(conn, lib1.id, "A", None).await; + let _b = make_series(conn, lib1.id, "B", None).await; + let _c = make_series(conn, lib2.id, "C", None).await; + + let scoped = resolve_series_scope(conn, Some(lib1.id), None) + .await + .unwrap(); + assert_eq!(scoped.len(), 2); + } + + #[tokio::test] + async fn resolve_scope_no_args_returns_all_series() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let _a = make_series(conn, lib.id, "A", None).await; + let _b = make_series(conn, lib.id, "B", None).await; + + let scoped = resolve_series_scope(conn, None, None).await.unwrap(); + assert_eq!(scoped.len(), 2); + } + + #[test] + fn describe_scope_strings() { + let lib = Uuid::new_v4(); + assert!(describe_scope(None, None).starts_with("scope=all")); + assert!(describe_scope(Some(lib), None).starts_with("scope=library:")); + assert_eq!( + describe_scope(Some(lib), Some(&[Uuid::new_v4(), Uuid::new_v4()])), + "scope=series_ids:2", + ); + } + + #[test] + fn handler_creation() { + let _ = BackfillTrackingFromMetadataHandler::new(); + let _ = BackfillTrackingFromMetadataHandler; + } +} diff --git a/src/tasks/handlers/mod.rs b/src/tasks/handlers/mod.rs index edd6b5d6..0d0a6bbd 100644 --- a/src/tasks/handlers/mod.rs +++ b/src/tasks/handlers/mod.rs @@ -8,6 +8,7 @@ use crate::tasks::types::TaskResult; pub mod analyze_book; pub mod analyze_series; +pub mod backfill_tracking; pub mod cleanup_book_files; pub mod cleanup_orphaned_files; pub mod cleanup_pdf_cache; @@ -32,6 +33,7 @@ pub mod user_plugin_sync; pub use analyze_book::AnalyzeBookHandler; pub use analyze_series::AnalyzeSeriesHandler; +pub use backfill_tracking::BackfillTrackingFromMetadataHandler; pub use cleanup_book_files::CleanupBookFilesHandler; pub use cleanup_orphaned_files::CleanupOrphanedFilesHandler; pub use cleanup_pdf_cache::CleanupPdfCacheHandler; diff --git a/src/tasks/types.rs b/src/tasks/types.rs index 6e38bf47..6261c8f5 100644 --- a/src/tasks/types.rs +++ b/src/tasks/types.rs @@ -206,6 +206,21 @@ pub enum TaskType { #[serde(default)] reason: Option, }, + + /// Backfill release-tracking aliases from existing series metadata. + /// + /// Walks series in scope, harvests the canonical title plus alternate titles + /// from `series_metadata` and `series_alternate_titles`, and seeds them as + /// `metadata`-source aliases in `series_aliases`. Idempotent — re-runs do + /// not create duplicates. Does NOT enable tracking; that stays explicit. + BackfillTrackingFromMetadata { + /// If set, scope to this library; otherwise all series. + #[serde(rename = "libraryId", default)] + library_id: Option, + /// If set, scope to these specific series (takes precedence over library_id). + #[serde(rename = "seriesIds", default)] + series_ids: Option>, + }, } fn default_mode() -> String { @@ -251,6 +266,8 @@ impl TaskType { TaskType::UserPluginRecommendationDismiss { .. } => 200, TaskType::UserPluginSync { .. } => 190, TaskType::UserPluginRecommendations { .. } => 180, + // Release tracking maintenance + TaskType::BackfillTrackingFromMetadata { .. } => 150, // Cleanup TaskType::CleanupBookFiles { .. } | TaskType::CleanupSeriesFiles { .. } @@ -292,6 +309,7 @@ impl TaskType { TaskType::UserPluginRecommendationDismiss { .. } => { "user_plugin_recommendation_dismiss" } + TaskType::BackfillTrackingFromMetadata { .. } => "backfill_tracking_from_metadata", } } @@ -308,6 +326,7 @@ impl TaskType { TaskType::GenerateThumbnails { library_id, .. } => *library_id, TaskType::GenerateSeriesThumbnails { library_id, .. } => *library_id, TaskType::ReprocessSeriesTitles { library_id, .. } => *library_id, + TaskType::BackfillTrackingFromMetadata { library_id, .. } => *library_id, _ => None, } } @@ -407,6 +426,9 @@ impl TaskType { "reason": reason, }) } + TaskType::BackfillTrackingFromMetadata { series_ids, .. } => { + serde_json::json!({ "series_ids": series_ids }) + } _ => serde_json::json!({}), } } diff --git a/src/tasks/worker.rs b/src/tasks/worker.rs index cfb09f1c..c6d152b0 100644 --- a/src/tasks/worker.rs +++ b/src/tasks/worker.rs @@ -26,12 +26,13 @@ use crate::services::user_plugin::OAuthStateManager; use crate::services::{SettingsService, TaskMetricsService, ThumbnailService}; use crate::tasks::error::check_rate_limited; use crate::tasks::handlers::{ - AnalyzeBookHandler, AnalyzeSeriesHandler, CleanupBookFilesHandler, CleanupOrphanedFilesHandler, - CleanupPdfCacheHandler, CleanupPluginDataHandler, CleanupSeriesExportsHandler, - CleanupSeriesFilesHandler, ExportSeriesHandler, FindDuplicatesHandler, - GenerateSeriesThumbnailHandler, GenerateSeriesThumbnailsHandler, GenerateThumbnailHandler, - GenerateThumbnailsHandler, PluginAutoMatchHandler, PurgeDeletedHandler, - RefreshLibraryMetadataHandler, RenumberSeriesBatchHandler, RenumberSeriesHandler, + AnalyzeBookHandler, AnalyzeSeriesHandler, BackfillTrackingFromMetadataHandler, + CleanupBookFilesHandler, CleanupOrphanedFilesHandler, CleanupPdfCacheHandler, + CleanupPluginDataHandler, CleanupSeriesExportsHandler, CleanupSeriesFilesHandler, + ExportSeriesHandler, FindDuplicatesHandler, GenerateSeriesThumbnailHandler, + GenerateSeriesThumbnailsHandler, GenerateThumbnailHandler, GenerateThumbnailsHandler, + PluginAutoMatchHandler, PurgeDeletedHandler, RefreshLibraryMetadataHandler, + RenumberSeriesBatchHandler, RenumberSeriesHandler, ReprocessSeriesTitleHandler, ReprocessSeriesTitlesHandler, ScanLibraryHandler, TaskHandler, UserPluginRecommendationDismissHandler, UserPluginRecommendationsHandler, UserPluginSyncHandler, @@ -101,6 +102,11 @@ impl TaskWorker { "cleanup_plugin_data".to_string(), Arc::new(CleanupPluginDataHandler::new()), ); + // Release-tracking maintenance: backfill aliases from metadata. + handlers.insert( + "backfill_tracking_from_metadata".to_string(), + Arc::new(BackfillTrackingFromMetadataHandler::new()), + ); // Generate worker ID from hostname or random UUID let worker_id = std::env::var("HOSTNAME") diff --git a/tests/api.rs b/tests/api.rs index 73b81c0a..6112af64 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -47,6 +47,7 @@ mod api { mod tags; mod task_metrics; mod thumbnails; + mod tracking; mod user_plugins; mod user_preferences; mod user_ratings; diff --git a/tests/api/tracking.rs b/tests/api/tracking.rs new file mode 100644 index 00000000..2aa9adcd --- /dev/null +++ b/tests/api/tracking.rs @@ -0,0 +1,383 @@ +//! Integration tests for release-tracking config + alias endpoints. + +#[path = "../common/mod.rs"] +mod common; + +use codex::api::error::ErrorResponse; +use codex::api::routes::v1::dto::tracking::{ + CreateSeriesAliasRequest, SeriesAliasDto, SeriesAliasListResponse, SeriesTrackingDto, + UpdateSeriesTrackingRequest, +}; +use codex::db::ScanningStrategy; +use codex::db::repositories::{LibraryRepository, SeriesRepository, UserRepository}; +use codex::utils::password; +use common::*; +use hyper::StatusCode; +use uuid::Uuid; + +async fn create_admin_and_token( + db: &sea_orm::DatabaseConnection, + state: &codex::api::extractors::AuthState, +) -> String { + let password_hash = password::hash_password("admin123").unwrap(); + let user = create_test_user("admin", "admin@example.com", &password_hash, true); + let created = UserRepository::create(db, &user).await.unwrap(); + state + .jwt_service + .generate_token(created.id, created.username.clone(), created.get_role()) + .unwrap() +} + +async fn create_regular_user_and_token( + db: &sea_orm::DatabaseConnection, + state: &codex::api::extractors::AuthState, +) -> String { + let password_hash = password::hash_password("user123").unwrap(); + let user = create_test_user("regular", "user@example.com", &password_hash, false); + let created = UserRepository::create(db, &user).await.unwrap(); + state + .jwt_service + .generate_token(created.id, created.username.clone(), created.get_role()) + .unwrap() +} + +async fn create_test_series(db: &sea_orm::DatabaseConnection) -> (Uuid, Uuid) { + let library = + LibraryRepository::create(db, "Test Library", "/test/path", ScanningStrategy::Default) + .await + .unwrap(); + let series = SeriesRepository::create(db, library.id, "Test Series", None) + .await + .unwrap(); + (library.id, series.id) +} + +// ============================================================================= +// GET /tracking +// ============================================================================= + +#[tokio::test] +async fn get_tracking_returns_virtual_default_when_no_row() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = get_request_with_auth(&format!("/api/v1/series/{}/tracking", series_id), &token); + let (status, dto): (StatusCode, Option) = make_json_request(app, req).await; + + assert_eq!(status, StatusCode::OK); + let dto = dto.unwrap(); + assert_eq!(dto.series_id, series_id); + assert!(!dto.tracked); + assert_eq!(dto.tracking_status, "unknown"); + assert!(dto.track_chapters); + assert!(dto.track_volumes); +} + +#[tokio::test] +async fn get_tracking_404_when_series_missing() { + let (db, _temp) = setup_test_db().await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let fake = Uuid::new_v4(); + let req = get_request_with_auth(&format!("/api/v1/series/{}/tracking", fake), &token); + let (status, _err): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::NOT_FOUND); +} + +// ============================================================================= +// PATCH /tracking +// ============================================================================= + +#[tokio::test] +async fn patch_tracking_creates_then_updates() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + + // First PATCH: insert. + let app1 = create_test_router(state.clone()).await; + let body = UpdateSeriesTrackingRequest { + tracked: Some(true), + tracking_status: Some("ongoing".to_string()), + latest_known_chapter: Some(Some(142.5)), + ..Default::default() + }; + let req = patch_json_request_with_auth( + &format!("/api/v1/series/{}/tracking", series_id), + &body, + &token, + ); + let (status, dto): (StatusCode, Option) = make_json_request(app1, req).await; + assert_eq!(status, StatusCode::OK); + let dto = dto.unwrap(); + assert!(dto.tracked); + assert_eq!(dto.tracking_status, "ongoing"); + assert_eq!(dto.latest_known_chapter, Some(142.5)); + + // Second PATCH: only update one field; others persist. + let app2 = create_test_router(state).await; + let body = UpdateSeriesTrackingRequest { + latest_known_chapter: Some(Some(143.0)), + ..Default::default() + }; + let req = patch_json_request_with_auth( + &format!("/api/v1/series/{}/tracking", series_id), + &body, + &token, + ); + let (status, dto): (StatusCode, Option) = make_json_request(app2, req).await; + assert_eq!(status, StatusCode::OK); + let dto = dto.unwrap(); + assert!(dto.tracked, "tracked should persist"); + assert_eq!(dto.tracking_status, "ongoing", "status should persist"); + assert_eq!(dto.latest_known_chapter, Some(143.0)); +} + +#[tokio::test] +async fn patch_tracking_rejects_invalid_status() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let body = UpdateSeriesTrackingRequest { + tracking_status: Some("paused".to_string()), + ..Default::default() + }; + let req = patch_json_request_with_auth( + &format!("/api/v1/series/{}/tracking", series_id), + &body, + &token, + ); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::BAD_REQUEST); +} + +#[tokio::test] +async fn patch_tracking_requires_auth() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let app = create_test_router(state).await; + + let body = UpdateSeriesTrackingRequest { + tracked: Some(true), + ..Default::default() + }; + let req = patch_json_request(&format!("/api/v1/series/{}/tracking", series_id), &body); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::UNAUTHORIZED); +} + +// ============================================================================= +// Aliases +// ============================================================================= + +#[tokio::test] +async fn list_aliases_empty_for_new_series() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = get_request_with_auth(&format!("/api/v1/series/{}/aliases", series_id), &token); + let (status, body): (StatusCode, Option) = + make_json_request(app, req).await; + assert_eq!(status, StatusCode::OK); + assert!(body.unwrap().aliases.is_empty()); +} + +#[tokio::test] +async fn create_alias_inserts_then_idempotent() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + + let app1 = create_test_router(state.clone()).await; + let body = CreateSeriesAliasRequest { + alias: "My Hero Academia".to_string(), + source: None, + }; + let req = post_json_request_with_auth( + &format!("/api/v1/series/{}/aliases", series_id), + &body, + &token, + ); + let (status, dto): (StatusCode, Option) = make_json_request(app1, req).await; + assert_eq!(status, StatusCode::CREATED); + let dto = dto.unwrap(); + assert_eq!(dto.series_id, series_id); + assert_eq!(dto.alias, "My Hero Academia"); + assert_eq!(dto.normalized, "my hero academia"); + assert_eq!(dto.source, "manual"); + + // Second call with same alias: idempotent OK (not CREATED), same id. + let app2 = create_test_router(state).await; + let body = CreateSeriesAliasRequest { + alias: "My Hero Academia".to_string(), + source: None, + }; + let req = post_json_request_with_auth( + &format!("/api/v1/series/{}/aliases", series_id), + &body, + &token, + ); + let (status, dto2): (StatusCode, Option) = make_json_request(app2, req).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(dto2.unwrap().id, dto.id); +} + +#[tokio::test] +async fn create_alias_rejects_blank() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let body = CreateSeriesAliasRequest { + alias: " ".to_string(), + source: None, + }; + let req = post_json_request_with_auth( + &format!("/api/v1/series/{}/aliases", series_id), + &body, + &token, + ); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::BAD_REQUEST); +} + +#[tokio::test] +async fn create_alias_rejects_invalid_explicit_source() { + // An explicit invalid source falls back to `manual` (we filter via is_valid), + // so the create should succeed but with source = "manual". This guards + // against a 500: bad input shouldn't crash, even if we don't surface 400. + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let body = CreateSeriesAliasRequest { + alias: "Test".to_string(), + source: Some("garbage".to_string()), + }; + let req = post_json_request_with_auth( + &format!("/api/v1/series/{}/aliases", series_id), + &body, + &token, + ); + let (status, dto): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::CREATED); + assert_eq!(dto.unwrap().source, "manual"); +} + +#[tokio::test] +async fn delete_alias_removes_row() { + use codex::db::repositories::SeriesAliasRepository; + + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let alias = SeriesAliasRepository::create(&db, series_id, "Manual Alias", "manual") + .await + .unwrap(); + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = delete_request_with_auth( + &format!("/api/v1/series/{}/aliases/{}", series_id, alias.id), + &token, + ); + let (status, _bytes) = make_request(app, req).await; + assert_eq!(status, StatusCode::NO_CONTENT); + + let remaining = SeriesAliasRepository::get_for_series(&db, series_id) + .await + .unwrap(); + assert!(remaining.is_empty()); +} + +#[tokio::test] +async fn delete_alias_404_when_alias_missing() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let fake = Uuid::new_v4(); + let req = delete_request_with_auth( + &format!("/api/v1/series/{}/aliases/{}", series_id, fake), + &token, + ); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::NOT_FOUND); +} + +#[tokio::test] +async fn delete_alias_404_when_belongs_to_other_series() { + use codex::db::repositories::SeriesAliasRepository; + + let (db, _temp) = setup_test_db().await; + let (lib_id, series_a) = create_test_series(&db).await; + let series_b = SeriesRepository::create(&db, lib_id, "Other", None) + .await + .unwrap(); + let alias_b = SeriesAliasRepository::create(&db, series_b.id, "Belongs To B", "manual") + .await + .unwrap(); + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + // Try to delete series_b's alias by quoting series_a's path. + let req = delete_request_with_auth( + &format!("/api/v1/series/{}/aliases/{}", series_a, alias_b.id), + &token, + ); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::NOT_FOUND); + + // Confirm alias still exists. + assert!( + SeriesAliasRepository::get_by_id(&db, alias_b.id) + .await + .unwrap() + .is_some() + ); +} + +#[tokio::test] +async fn aliases_require_write_permission_for_mutations() { + let (db, _temp) = setup_test_db().await; + let (_lib, series_id) = create_test_series(&db).await; + let state = create_test_auth_state(db.clone()).await; + let token = create_regular_user_and_token(&db, &state).await; + + let app = create_test_router(state).await; + let body = CreateSeriesAliasRequest { + alias: "X".to_string(), + source: None, + }; + let req = post_json_request_with_auth( + &format!("/api/v1/series/{}/aliases", series_id), + &body, + &token, + ); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::FORBIDDEN); +} diff --git a/web/openapi.json b/web/openapi.json index b2e200e7..27d7693d 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -8789,6 +8789,172 @@ ] } }, + "/api/v1/series/{series_id}/aliases": { + "get": { + "tags": [ + "Tracking" + ], + "summary": "List release-matching aliases for a series.", + "operationId": "list_series_aliases", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "List of aliases", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesAliasListResponse" + } + } + } + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + }, + "post": { + "tags": [ + "Tracking" + ], + "summary": "Create a release-matching alias for a series.", + "description": "Idempotent: if `(series_id, alias)` already exists, returns the existing\nrow with HTTP 200 instead of inserting a duplicate.", + "operationId": "create_series_alias", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateSeriesAliasRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Alias already existed (idempotent)", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesAliasDto" + } + } + } + }, + "201": { + "description": "Alias created", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesAliasDto" + } + } + } + }, + "400": { + "description": "Invalid alias (empty after normalization)" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, + "/api/v1/series/{series_id}/aliases/{alias_id}": { + "delete": { + "tags": [ + "Tracking" + ], + "summary": "Delete a release-matching alias.", + "operationId": "delete_series_alias", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "name": "alias_id", + "in": "path", + "description": "Alias ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "Alias deleted" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series or alias not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/{series_id}/alternate-titles": { "get": { "tags": [ @@ -11459,6 +11625,113 @@ ] } }, + "/api/v1/series/{series_id}/tracking": { + "get": { + "tags": [ + "Tracking" + ], + "summary": "Get release-tracking config for a series.", + "description": "Returns a virtual untracked row when no `series_tracking` row exists, so the\nfrontend can render the panel uniformly without special-casing absent rows.", + "operationId": "get_series_tracking", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "Tracking config", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesTrackingDto" + } + } + } + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + }, + "patch": { + "tags": [ + "Tracking" + ], + "summary": "Update release-tracking config for a series.", + "description": "Upserts: creates the row on first write, applies the patch otherwise.\nAll fields are optional — omit to leave alone, send `null` on a nullable\nfield to clear it.", + "operationId": "update_series_tracking", + "parameters": [ + { + "name": "series_id", + "in": "path", + "description": "Series ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateSeriesTrackingRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Tracking config updated", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SeriesTrackingDto" + } + } + } + }, + "400": { + "description": "Invalid tracking_status" + }, + "403": { + "description": "Forbidden" + }, + "404": { + "description": "Series not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/{series_id}/unread": { "post": { "tags": [ @@ -21386,6 +21659,26 @@ } } }, + "CreateSeriesAliasRequest": { + "type": "object", + "required": [ + "alias" + ], + "properties": { + "alias": { + "type": "string", + "description": "Alias text. Will be trimmed; must normalize to non-empty.", + "example": "Boku no Hero Academia" + }, + "source": { + "type": [ + "string", + "null" + ], + "description": "Optional explicit source. Defaults to `manual` when called from the API.\nPlugin-internal flows write `metadata`; we don't expose that to HTTP." + } + } + }, "CreateSeriesExportRequest": { "type": "object", "description": "Request body for creating a new series export", @@ -32435,6 +32728,64 @@ } } }, + "SeriesAliasDto": { + "type": "object", + "description": "Title alias used by release-source plugins to match incoming releases by\ntitle (Nyaa, MangaUpdates without an external ID, etc.).", + "required": [ + "id", + "seriesId", + "alias", + "normalized", + "source", + "createdAt" + ], + "properties": { + "alias": { + "type": "string", + "description": "Alias as entered (preserves casing/punctuation).", + "example": "My Hero Academia" + }, + "createdAt": { + "type": "string", + "format": "date-time" + }, + "id": { + "type": "string", + "format": "uuid", + "description": "Alias row ID.", + "example": "550e8400-e29b-41d4-a716-446655440100" + }, + "normalized": { + "type": "string", + "description": "Lowercased + punctuation-stripped form used for matching.", + "example": "my hero academia" + }, + "seriesId": { + "type": "string", + "format": "uuid", + "example": "550e8400-e29b-41d4-a716-446655440002" + }, + "source": { + "type": "string", + "description": "`metadata` (auto-derived) | `manual` (user-entered).", + "example": "manual" + } + } + }, + "SeriesAliasListResponse": { + "type": "object", + "required": [ + "aliases" + ], + "properties": { + "aliases": { + "type": "array", + "items": { + "$ref": "#/components/schemas/SeriesAliasDto" + } + } + } + }, "SeriesAverageRatingResponse": { "type": "object", "description": "Response containing the average community rating for a series", @@ -33504,6 +33855,89 @@ "custom" ] }, + "SeriesTrackingDto": { + "type": "object", + "description": "Per-series release-tracking configuration.\n\nReturned even for untracked series — the row defaults to `tracked: false`\nwith conservative defaults so the frontend can render the panel without\nspecial-casing missing rows.", + "required": [ + "seriesId", + "tracked", + "trackingStatus", + "trackChapters", + "trackVolumes", + "createdAt", + "updatedAt" + ], + "properties": { + "confidenceThresholdOverride": { + "type": [ + "number", + "null" + ], + "format": "double", + "description": "Per-series override of the server's confidence threshold (0.0 - 1.0)." + }, + "createdAt": { + "type": "string", + "format": "date-time", + "description": "When the row was created (epoch when virtual)." + }, + "latestKnownChapter": { + "type": [ + "number", + "null" + ], + "format": "double", + "description": "Latest known external chapter (supports decimals like 12.5)." + }, + "latestKnownVolume": { + "type": [ + "integer", + "null" + ], + "format": "int32", + "description": "Latest known external volume." + }, + "pollIntervalOverrideS": { + "type": [ + "integer", + "null" + ], + "format": "int32", + "description": "Per-series override of the source poll interval (seconds)." + }, + "seriesId": { + "type": "string", + "format": "uuid", + "description": "Series ID this config belongs to.", + "example": "550e8400-e29b-41d4-a716-446655440002" + }, + "trackChapters": { + "type": "boolean", + "description": "Whether to announce new chapters." + }, + "trackVolumes": { + "type": "boolean", + "description": "Whether to announce new volumes." + }, + "tracked": { + "type": "boolean", + "description": "Whether release tracking is enabled." + }, + "trackingStatus": { + "type": "string", + "description": "Publication status: `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`.", + "example": "ongoing" + }, + "updatedAt": { + "type": "string", + "format": "date-time", + "description": "When the row was last updated (epoch when virtual)." + }, + "volumeChapterMap": { + "description": "Sparse map of `{ \"\": { \"first\": ch, \"last\": ch } }`." + } + } + }, "SeriesUpdateResponse": { "type": "object", "description": "Response for series update", @@ -35383,6 +35817,40 @@ "format": "uuid" } } + }, + { + "type": "object", + "description": "Backfill release-tracking aliases from existing series metadata.\n\nWalks series in scope, harvests the canonical title plus alternate titles\nfrom `series_metadata` and `series_alternate_titles`, and seeds them as\n`metadata`-source aliases in `series_aliases`. Idempotent — re-runs do\nnot create duplicates. Does NOT enable tracking; that stays explicit.", + "required": [ + "type" + ], + "properties": { + "libraryId": { + "type": [ + "string", + "null" + ], + "format": "uuid", + "description": "If set, scope to this library; otherwise all series." + }, + "seriesIds": { + "type": [ + "array", + "null" + ], + "items": { + "type": "string", + "format": "uuid" + }, + "description": "If set, scope to these specific series (takes precedence over library_id)." + }, + "type": { + "type": "string", + "enum": [ + "backfill_tracking_from_metadata" + ] + } + } } ], "description": "Task types supported by the distributed task queue" @@ -36475,6 +36943,67 @@ } } }, + "UpdateSeriesTrackingRequest": { + "type": "object", + "description": "PATCH payload for tracking config. All fields are optional:\nomit a field to leave it untouched. Use a JSON `null` on a nullable field\nto clear it explicitly.", + "properties": { + "confidenceThresholdOverride": { + "type": [ + "number", + "null" + ], + "format": "double" + }, + "latestKnownChapter": { + "type": [ + "number", + "null" + ], + "format": "double", + "description": "Use `Some(null)` to clear, `Some()` to set, omit to leave alone." + }, + "latestKnownVolume": { + "type": [ + "integer", + "null" + ], + "format": "int32" + }, + "pollIntervalOverrideS": { + "type": [ + "integer", + "null" + ], + "format": "int32" + }, + "trackChapters": { + "type": [ + "boolean", + "null" + ] + }, + "trackVolumes": { + "type": [ + "boolean", + "null" + ] + }, + "tracked": { + "type": [ + "boolean", + "null" + ] + }, + "trackingStatus": { + "type": [ + "string", + "null" + ], + "description": "`ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`." + }, + "volumeChapterMap": {} + } + }, "UpdateSettingRequest": { "type": "object", "description": "Update setting request", @@ -37359,6 +37888,10 @@ "name": "Series", "description": "Series browsing and search endpoints" }, + { + "name": "Tracking", + "description": "Release-tracking config and matcher aliases" + }, { "name": "Books", "description": "Book details and metadata endpoints" diff --git a/web/src/api/tracking.ts b/web/src/api/tracking.ts new file mode 100644 index 00000000..e860d797 --- /dev/null +++ b/web/src/api/tracking.ts @@ -0,0 +1,51 @@ +import type { components } from "@/types/api.generated"; +import { api } from "./client"; + +export type SeriesTracking = components["schemas"]["SeriesTrackingDto"]; +export type UpdateSeriesTrackingRequest = + components["schemas"]["UpdateSeriesTrackingRequest"]; +export type SeriesAlias = components["schemas"]["SeriesAliasDto"]; +export type CreateSeriesAliasRequest = + components["schemas"]["CreateSeriesAliasRequest"]; + +export const trackingApi = { + getTracking: async (seriesId: string): Promise => { + const response = await api.get( + `/series/${seriesId}/tracking`, + ); + return response.data; + }, + + updateTracking: async ( + seriesId: string, + update: UpdateSeriesTrackingRequest, + ): Promise => { + const response = await api.patch( + `/series/${seriesId}/tracking`, + update, + ); + return response.data; + }, + + listAliases: async (seriesId: string): Promise => { + const response = await api.get<{ aliases: SeriesAlias[] }>( + `/series/${seriesId}/aliases`, + ); + return response.data.aliases; + }, + + createAlias: async ( + seriesId: string, + request: CreateSeriesAliasRequest, + ): Promise => { + const response = await api.post( + `/series/${seriesId}/aliases`, + request, + ); + return response.data; + }, + + deleteAlias: async (seriesId: string, aliasId: string): Promise => { + await api.delete(`/series/${seriesId}/aliases/${aliasId}`); + }, +}; diff --git a/web/src/components/library/BulkSelectionToolbar.test.tsx b/web/src/components/library/BulkSelectionToolbar.test.tsx index d3f8e5f4..f00a14c6 100644 --- a/web/src/components/library/BulkSelectionToolbar.test.tsx +++ b/web/src/components/library/BulkSelectionToolbar.test.tsx @@ -38,6 +38,20 @@ vi.mock("@/api/series", () => ({ }, })); +vi.mock("@/api/tracking", () => ({ + trackingApi: { + updateTracking: vi.fn().mockResolvedValue({ + seriesId: "series-1", + tracked: true, + trackingStatus: "unknown", + trackChapters: true, + trackVolumes: true, + createdAt: "2024-01-01T00:00:00Z", + updatedAt: "2024-01-01T00:00:00Z", + }), + }, +})); + // Mock the usePermissions hook - default to admin (all permissions) vi.mock("@/hooks/usePermissions", () => ({ usePermissions: vi.fn(), @@ -351,6 +365,52 @@ describe("BulkSelectionToolbar", () => { expect(seriesApi.bulkAnalyze).toHaveBeenCalledWith(["series-1"]); }); }); + + it("should call updateTracking for each series when Mark as Tracked clicked", async () => { + const { trackingApi } = await import("@/api/tracking"); + const user = userEvent.setup(); + + useBulkSelectionStore.getState().toggleSelection("series-1", "series"); + useBulkSelectionStore.getState().toggleSelection("series-2", "series"); + + renderWithProviders(); + + await user.click(screen.getByRole("button", { name: /more actions/i })); + await waitFor(() => { + expect(screen.getByText("Mark as Tracked")).toBeInTheDocument(); + }); + await user.click(screen.getByText("Mark as Tracked")); + + await waitFor(() => { + expect(trackingApi.updateTracking).toHaveBeenCalledWith("series-1", { + tracked: true, + }); + expect(trackingApi.updateTracking).toHaveBeenCalledWith("series-2", { + tracked: true, + }); + }); + }); + + it("should call updateTracking with tracked=false when Mark as Untracked clicked", async () => { + const { trackingApi } = await import("@/api/tracking"); + const user = userEvent.setup(); + + useBulkSelectionStore.getState().toggleSelection("series-1", "series"); + + renderWithProviders(); + + await user.click(screen.getByRole("button", { name: /more actions/i })); + await waitFor(() => { + expect(screen.getByText("Mark as Untracked")).toBeInTheDocument(); + }); + await user.click(screen.getByText("Mark as Untracked")); + + await waitFor(() => { + expect(trackingApi.updateTracking).toHaveBeenCalledWith("series-1", { + tracked: false, + }); + }); + }); }); describe("selection clearing after action", () => { diff --git a/web/src/components/library/BulkSelectionToolbar.tsx b/web/src/components/library/BulkSelectionToolbar.tsx index bf50cf4c..51598183 100644 --- a/web/src/components/library/BulkSelectionToolbar.tsx +++ b/web/src/components/library/BulkSelectionToolbar.tsx @@ -11,6 +11,8 @@ import { import { notifications } from "@mantine/notifications"; import { IconAnalyze, + IconBell, + IconBellOff, IconBook, IconBookOff, IconChevronDown, @@ -27,6 +29,7 @@ import { useEffect, useMemo, useState } from "react"; import { booksApi } from "@/api/books"; import { pluginActionsApi, pluginsApi } from "@/api/plugins"; import { seriesApi } from "@/api/series"; +import { trackingApi } from "@/api/tracking"; import { BulkMetadataEditModal } from "@/components/library/BulkMetadataEditModal"; import { usePermissions } from "@/hooks/usePermissions"; import { @@ -386,6 +389,49 @@ export function BulkSelectionToolbar() { }, }); + // Bulk set release-tracking flag. No dedicated bulk endpoint exists yet — + // fan out per-series PATCH calls. Acceptable scale for a hand-managed library + // (hundreds of series, low-frequency action). + const bulkSetTrackedMutation = useMutation({ + mutationFn: async ({ + seriesIds, + tracked, + }: { + seriesIds: string[]; + tracked: boolean; + }) => { + const results = await Promise.allSettled( + seriesIds.map((id) => trackingApi.updateTracking(id, { tracked })), + ); + const failed = results.filter((r) => r.status === "rejected").length; + return { total: seriesIds.length, failed }; + }, + onSuccess: ({ total, failed }, { tracked }) => { + if (failed === 0) { + notifications.show({ + title: tracked ? "Tracking enabled" : "Tracking disabled", + message: `Updated ${total} series.`, + color: tracked ? "green" : "blue", + }); + } else { + notifications.show({ + title: "Some updates failed", + message: `${total - failed} of ${total} series updated; ${failed} failed.`, + color: "yellow", + }); + } + refetchAll(); + clearSelection(); + }, + onError: (error: Error) => { + notifications.show({ + title: "Failed to update tracking", + message: error.message || "Bulk tracking update failed", + color: "red", + }); + }, + }); + // Bulk reset series metadata const bulkResetMetadataMutation = useMutation({ mutationFn: (seriesIds: string[]) => seriesApi.bulkResetMetadata(seriesIds), @@ -452,7 +498,8 @@ export function BulkSelectionToolbar() { bulkGenerateSeriesBookThumbnailsMutation.isPending || bulkReprocessTitlesMutation.isPending || bulkRenumberSeriesMutation.isPending || - bulkResetMetadataMutation.isPending; + bulkResetMetadataMutation.isPending || + bulkSetTrackedMutation.isPending; // Determine if the "More" menu should be shown based on permissions const showBooksMoreMenu = isBooks && (canWriteBooks || canWriteTasks); @@ -799,6 +846,33 @@ export function BulkSelectionToolbar() { Reprocess Titles + + Release Tracking + } + onClick={() => + bulkSetTrackedMutation.mutate({ + seriesIds: selectedIds, + tracked: true, + }) + } + disabled={isAnyPending} + > + Mark as Tracked + + } + onClick={() => + bulkSetTrackedMutation.mutate({ + seriesIds: selectedIds, + tracked: false, + }) + } + disabled={isAnyPending} + > + Mark as Untracked + + Metadata ({ + trackingApi: { + getTracking: vi.fn(), + updateTracking: vi.fn(), + listAliases: vi.fn(), + createAlias: vi.fn(), + deleteAlias: vi.fn(), + }, +})); + +const get = vi.mocked(trackingApi.getTracking); +const update = vi.mocked(trackingApi.updateTracking); +const list = vi.mocked(trackingApi.listAliases); +const create = vi.mocked(trackingApi.createAlias); +const del = vi.mocked(trackingApi.deleteAlias); + +const SERIES_ID = "00000000-0000-0000-0000-000000000001"; + +const baseTracking = { + seriesId: SERIES_ID, + tracked: false, + trackingStatus: "unknown", + trackChapters: true, + trackVolumes: true, + createdAt: "2024-01-01T00:00:00Z", + updatedAt: "2024-01-01T00:00:00Z", +}; + +const baseAlias = ( + alias: string, + source: "manual" | "metadata" = "manual", +) => ({ + id: `alias-${alias}`, + seriesId: SERIES_ID, + alias, + normalized: alias.toLowerCase(), + source, + createdAt: "2024-01-01T00:00:00Z", +}); + +describe("TrackingPanel", () => { + beforeEach(() => { + vi.clearAllMocks(); + list.mockResolvedValue([]); + }); + + it("renders the toggle in untracked state", async () => { + get.mockResolvedValue({ ...baseTracking }); + + renderWithProviders(); + + await waitFor(() => { + expect( + screen.getByRole("switch", { name: /Toggle release tracking/i }), + ).not.toBeChecked(); + }); + + // Status select is hidden when not tracked. + expect(screen.queryByText("Status")).not.toBeInTheDocument(); + }); + + it("shows status and announce flags when tracked", async () => { + get.mockResolvedValue({ + ...baseTracking, + tracked: true, + trackingStatus: "ongoing", + latestKnownChapter: 142.5, + }); + + renderWithProviders(); + + await waitFor(() => { + expect(screen.getByText("Status")).toBeInTheDocument(); + }); + expect(screen.getByText(/Ongoing/i)).toBeInTheDocument(); + expect(screen.getByLabelText("Chapters")).toBeChecked(); + expect(screen.getByLabelText("Volumes")).toBeChecked(); + }); + + it("toggles tracked via mutation", async () => { + const user = userEvent.setup(); + get.mockResolvedValue({ ...baseTracking }); + update.mockResolvedValue({ ...baseTracking, tracked: true }); + + renderWithProviders(); + + const toggle = await screen.findByRole("switch", { + name: /Toggle release tracking/i, + }); + await user.click(toggle); + + await waitFor(() => { + expect(update).toHaveBeenCalledWith(SERIES_ID, { tracked: true }); + }); + }); + + it("renders aliases and supports add", async () => { + const user = userEvent.setup(); + get.mockResolvedValue({ ...baseTracking, tracked: true }); + list.mockResolvedValue([baseAlias("Existing")]); + create.mockImplementation(async (_id, req) => baseAlias(req.alias)); + + renderWithProviders(); + + await screen.findByText("Existing"); + + const input = screen.getByPlaceholderText(/Add an alias/i); + await user.type(input, "New Alias"); + await user.click(screen.getByRole("button", { name: /^Add$/i })); + + await waitFor(() => { + expect(create).toHaveBeenCalledWith(SERIES_ID, { alias: "New Alias" }); + }); + }); + + it("hides edit affordances when canEdit=false", async () => { + get.mockResolvedValue({ ...baseTracking, tracked: true }); + list.mockResolvedValue([baseAlias("Read Only")]); + + renderWithProviders(); + + await screen.findByText("Read Only"); + + expect( + screen.queryByPlaceholderText(/Add an alias/i), + ).not.toBeInTheDocument(); + expect( + screen.queryByRole("button", { name: /^Add$/i }), + ).not.toBeInTheDocument(); + expect( + screen.getByRole("switch", { name: /Toggle release tracking/i }), + ).toBeDisabled(); + }); + + it("calls deleteAlias when remove is clicked", async () => { + const user = userEvent.setup(); + get.mockResolvedValue({ ...baseTracking, tracked: true }); + const alias = baseAlias("Delete Me"); + list.mockResolvedValue([alias]); + del.mockResolvedValue(undefined); + + renderWithProviders(); + + await screen.findByText("Delete Me"); + + const removeButton = screen.getByRole("button", { + name: /Remove alias Delete Me/i, + }); + await user.click(removeButton); + + await waitFor(() => { + expect(del).toHaveBeenCalledWith(SERIES_ID, alias.id); + }); + }); +}); diff --git a/web/src/components/series/TrackingPanel.tsx b/web/src/components/series/TrackingPanel.tsx new file mode 100644 index 00000000..e4414bbb --- /dev/null +++ b/web/src/components/series/TrackingPanel.tsx @@ -0,0 +1,263 @@ +import { + ActionIcon, + Badge, + Box, + Button, + Card, + Divider, + Group, + NumberInput, + Select, + Stack, + Switch, + Text, + TextInput, + Tooltip, +} from "@mantine/core"; +import { IconBellRinging, IconPlus, IconTrash } from "@tabler/icons-react"; +import { type FormEvent, useState } from "react"; +import { + useCreateSeriesAlias, + useDeleteSeriesAlias, + useSeriesAliases, + useSeriesTracking, + useUpdateSeriesTracking, +} from "@/hooks/useSeriesTracking"; + +interface TrackingPanelProps { + seriesId: string; + /** When false, shows read-only state (used for users without SeriesWrite). */ + canEdit: boolean; +} + +const STATUS_OPTIONS = [ + { value: "unknown", label: "Unknown" }, + { value: "ongoing", label: "Ongoing" }, + { value: "complete", label: "Complete" }, + { value: "hiatus", label: "Hiatus" }, + { value: "cancelled", label: "Cancelled" }, +]; + +/** + * Inline panel on the series detail page for release-tracking config. + * + * Shows: tracked toggle, status, chapter/volume tracking flags, latest known + * chapter/volume, and the aliases list. All mutations debounce-free — the + * surface is small enough that immediate fire-on-blur is fine. + */ +export function TrackingPanel({ seriesId, canEdit }: TrackingPanelProps) { + const trackingQuery = useSeriesTracking(seriesId); + const aliasesQuery = useSeriesAliases(seriesId); + const updateTracking = useUpdateSeriesTracking(seriesId); + const createAlias = useCreateSeriesAlias(seriesId); + const deleteAlias = useDeleteSeriesAlias(seriesId); + + const [aliasDraft, setAliasDraft] = useState(""); + + const tracking = trackingQuery.data; + const aliases = aliasesQuery.data ?? []; + + const handleAddAlias = async (e: FormEvent) => { + e.preventDefault(); + const trimmed = aliasDraft.trim(); + if (!trimmed) return; + try { + await createAlias.mutateAsync({ alias: trimmed }); + setAliasDraft(""); + } catch { + // Notification surfaced inside the hook. + } + }; + + return ( + + + + + + Release tracking + {tracking?.tracked && ( + + Tracking + + )} + + + updateTracking.mutate({ tracked: event.currentTarget.checked }) + } + disabled={!canEdit || trackingQuery.isLoading} + aria-label="Toggle release tracking" + /> + + + {tracking?.tracked && ( + <> + + { + setState(value ?? "announced"); + setPage(1); + }} + w={180} + /> + { + setLanguage(e.currentTarget.value); + setPage(1); + }} + w={140} + /> + { + setSeriesIdFilter(e.currentTarget.value); + setPage(1); + }} + w={320} + /> + + + + {error && ( + + + Failed to load releases:{" "} + {error instanceof Error ? error.message : String(error)} + + + )} + + {isLoading ? ( + + + + ) : entries.length === 0 ? ( + + + No releases match these filters. New chapters and volumes show up + here once a release source picks them up. + + + ) : ( + + + + + Series + Ch / Vol + Source / Group + Lang + State + Observed + + + + + {entries.map((entry: ReleaseLedgerEntry) => { + const stateInfo = STATE_BADGE[entry.state] ?? { + color: "gray", + label: entry.state, + }; + return ( + + + + {entry.seriesId.slice(0, 8)}… + + + + + {entry.chapter !== null && entry.chapter !== undefined + ? `Ch ${entry.chapter}` + : ""} + {entry.volume !== null && entry.volume !== undefined + ? entry.chapter !== null && + entry.chapter !== undefined + ? ` · Vol ${entry.volume}` + : `Vol ${entry.volume}` + : ""} + {!entry.chapter && !entry.volume ? "—" : ""} + + + + + {entry.groupOrUploader && ( + {entry.groupOrUploader} + )} + + source: {entry.sourceId.slice(0, 8)}… + + + + + {entry.language ?? "—"} + + + + {stateInfo.label} + + + + + {format(new Date(entry.observedAt), "yyyy-MM-dd")} + + + + + + + + + + {entry.state === "announced" && ( + <> + + markAcquired.mutate(entry.id)} + aria-label="Mark acquired" + > + + + + + dismiss.mutate(entry.id)} + aria-label="Dismiss" + > + + + + + )} + + + + ); + })} + +
+
+ )} + + {totalPages > 1 && ( + + + + )} + + + ); +} diff --git a/web/src/pages/SeriesDetail.tsx b/web/src/pages/SeriesDetail.tsx index 28ad377a..0caf9020 100644 --- a/web/src/pages/SeriesDetail.tsx +++ b/web/src/pages/SeriesDetail.tsx @@ -55,6 +55,7 @@ import { BulkSelectionToolbar } from "@/components/library/BulkSelectionToolbar" import { MetadataApplyFlow } from "@/components/metadata"; import { AlternateTitles, + BehindByBadge, CommunityRating, CustomMetadataDisplay, ExternalIds, @@ -65,11 +66,13 @@ import { SeriesInfoModal, SeriesMetadataEditModal, SeriesRating, + SeriesReleasesPanel, TrackingPanel, } from "@/components/series"; import { formatSeriesCounts } from "@/components/series/seriesCounts"; import { useDynamicDocumentTitle } from "@/hooks/useDocumentTitle"; import { usePermissions } from "@/hooks/usePermissions"; +import { useSeriesTracking } from "@/hooks/useSeriesTracking"; import { useCoverUpdatesStore } from "@/store/coverUpdatesStore"; import { PERMISSIONS } from "@/types/permissions"; import { transformFullSeriesToSeriesContext } from "@/utils/templateUtils"; @@ -151,6 +154,12 @@ export function SeriesDetail() { enabled: !!seriesId && isAdmin, }); + // Fetch tracking config so we can render Behind-by-N badges next to the + // header counts (translation: latestKnownChapter > localMaxChapter, + // upstream: upstreamChapterGap > 0). The query is cheap and shared with + // the TrackingPanel below. + const { data: tracking } = useSeriesTracking(seriesId ?? "", !!seriesId); + // Fetch available plugin actions for series:detail scope, filtered by library const { data: pluginActions } = useQuery({ queryKey: ["plugin-actions", "series:detail", series?.libraryId], @@ -781,6 +790,66 @@ export function SeriesDetail() { ) : null; })()} + {/* Behind-by-N badges: translation gap (Phase 6 release sources) + and upstream gap (Phase 5 metadata signal). Each badge is a + no-op when the gap is zero/missing, the series isn't tracked, + or the corresponding axis is disabled. */} + {tracking?.tracked && ( + + {tracking.trackChapters && + tracking.latestKnownChapter != null && + series.localMaxChapter != null && + tracking.latestKnownChapter > series.localMaxChapter && ( + + )} + {tracking.trackVolumes && + tracking.latestKnownVolume != null && + series.localMaxVolume != null && + tracking.latestKnownVolume > series.localMaxVolume && ( + + )} + {series.upstreamChapterGap != null && + series.upstreamChapterGap > 0 && ( + + )} + {series.upstreamVolumeGap != null && + series.upstreamVolumeGap > 0 && ( + + )} + + )} + {/* Alternate titles inline */} {series.alternateTitles && series.alternateTitles.length > 0 && ( @@ -955,6 +1024,11 @@ export function SeriesDetail() { )} + {/* Releases panel: ledger entries grouped by chapter/volume. + Shows whenever the series has tracking enabled — the panel + renders an empty-state message if no entries exist yet. */} + {tracking?.tracked && } + {/* External Links */} {series.externalLinks && series.externalLinks.length > 0 && ( diff --git a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx new file mode 100644 index 00000000..3004ed4c --- /dev/null +++ b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx @@ -0,0 +1,118 @@ +import { beforeEach, describe, expect, it, vi } from "vitest"; +import { + type ReleaseSource, + releaseSourcesApi, + releasesApi, +} from "@/api/releases"; +import { renderWithProviders, screen, userEvent, waitFor } from "@/test/utils"; +import { ReleaseTrackingSettings } from "./ReleaseTrackingSettings"; + +vi.mock("@/api/releases", () => ({ + releasesApi: { + listInbox: vi.fn(), + listForSeries: vi.fn(), + patchEntry: vi.fn(), + dismiss: vi.fn(), + markAcquired: vi.fn(), + }, + releaseSourcesApi: { + list: vi.fn(), + update: vi.fn(), + pollNow: vi.fn(), + }, +})); + +const list = vi.mocked(releaseSourcesApi.list); +const update = vi.mocked(releaseSourcesApi.update); +const pollNow = vi.mocked(releaseSourcesApi.pollNow); + +function source(over: Partial = {}): ReleaseSource { + return { + id: "11111111-1111-1111-1111-111111111111", + pluginId: "release-mangaupdates", + sourceKey: "mu:batch", + displayName: "MangaUpdates batch", + kind: "rss-series", + enabled: true, + pollIntervalS: 21600, + lastPolledAt: "2026-05-01T00:00:00Z", + lastError: null, + lastErrorAt: null, + etag: null, + config: null, + createdAt: "2026-01-01T00:00:00Z", + updatedAt: "2026-05-01T00:00:00Z", + ...over, + }; +} + +describe("ReleaseTrackingSettings", () => { + beforeEach(() => { + vi.clearAllMocks(); + void releasesApi; + }); + + it("renders sources and the OK status when last poll is fresh", async () => { + list.mockResolvedValueOnce([source()]); + renderWithProviders(); + await waitFor(() => { + expect(screen.getByText("MangaUpdates batch")).toBeInTheDocument(); + }); + expect(screen.getByText("OK")).toBeInTheDocument(); + }); + + it("shows an Errored badge when last_error is populated", async () => { + list.mockResolvedValueOnce([ + source({ lastError: "upstream returned 503" }), + ]); + renderWithProviders(); + await waitFor(() => { + expect(screen.getByText("Errored")).toBeInTheDocument(); + }); + }); + + it("toggling enabled calls update with the new value", async () => { + list.mockResolvedValue([source()]); + update.mockResolvedValueOnce(source({ enabled: false })); + const user = userEvent.setup(); + renderWithProviders(); + await waitFor(() => { + expect(screen.getByText("MangaUpdates batch")).toBeInTheDocument(); + }); + const toggle = screen.getByRole("switch", { name: "Enable source" }); + await user.click(toggle); + await waitFor(() => { + expect(update).toHaveBeenCalledWith( + "11111111-1111-1111-1111-111111111111", + expect.objectContaining({ enabled: false }), + ); + }); + }); + + it("Poll now button is disabled when source is disabled", async () => { + list.mockResolvedValueOnce([source({ enabled: false })]); + renderWithProviders(); + await waitFor(() => { + expect(screen.getByText("MangaUpdates batch")).toBeInTheDocument(); + }); + const pollButton = screen.getByLabelText("Poll now"); + expect(pollButton).toBeDisabled(); + }); + + it("clicking Poll now triggers the API call when source is enabled", async () => { + list.mockResolvedValue([source()]); + pollNow.mockResolvedValueOnce({ status: "enqueued", message: "ok" }); + const user = userEvent.setup(); + renderWithProviders(); + await waitFor(() => { + expect(screen.getByText("MangaUpdates batch")).toBeInTheDocument(); + }); + const pollButton = screen.getByLabelText("Poll now"); + await user.click(pollButton); + await waitFor(() => { + expect(pollNow).toHaveBeenCalledWith( + "11111111-1111-1111-1111-111111111111", + ); + }); + }); +}); diff --git a/web/src/pages/settings/ReleaseTrackingSettings.tsx b/web/src/pages/settings/ReleaseTrackingSettings.tsx new file mode 100644 index 00000000..17834ecb --- /dev/null +++ b/web/src/pages/settings/ReleaseTrackingSettings.tsx @@ -0,0 +1,301 @@ +import { + ActionIcon, + Badge, + Box, + Card, + Group, + Loader, + NumberInput, + Stack, + Switch, + Table, + TagsInput, + Text, + Title, + Tooltip, +} from "@mantine/core"; +import { + IconAlertCircle, + IconBellRinging, + IconClockHour4, + IconRefresh, +} from "@tabler/icons-react"; +import { formatDistanceToNow } from "date-fns"; +import { useState } from "react"; +import type { ReleaseSource } from "@/api/releases"; +import { + usePollReleaseSourceNow, + useReleaseSources, + useUpdateReleaseSource, +} from "@/hooks/useReleases"; +import { useReleaseAnnouncementsStore } from "@/store/releaseAnnouncementsStore"; + +const PRESETS = [ + { value: 3600, label: "1h" }, + { value: 21600, label: "6h" }, + { value: 43200, label: "12h" }, + { value: 86400, label: "Daily" }, + { value: 604800, label: "Weekly" }, +]; + +function intervalLabel(seconds: number): string { + const preset = PRESETS.find((p) => p.value === seconds); + if (preset) return preset.label; + if (seconds % 3600 === 0) return `${seconds / 3600}h`; + return `${seconds}s`; +} + +export function ReleaseTrackingSettings() { + const sourcesQuery = useReleaseSources(); + const update = useUpdateReleaseSource(); + const pollNow = usePollReleaseSourceNow(); + + return ( + + + + + Release tracking + + + + Manage release sources. Each row is one logical feed exposed by a + plugin (e.g. one Nyaa uploader or one MangaUpdates batch). Disabling a + source pauses its scheduled polls; "Poll now" enqueues an immediate + fetch. + + + + + {sourcesQuery.isLoading ? ( + + + Loading sources… + + ) : sourcesQuery.error ? ( + + + + + Failed to load sources. + + + + ) : (sourcesQuery.data ?? []).length === 0 ? ( + + + No release sources configured. Install a plugin that declares the + `release_source` capability and configure at least one source. + + + ) : ( + + + + + Source + Plugin + Interval + Last poll + Status + Enabled + + + + + {(sourcesQuery.data ?? []).map((source) => ( + + update.mutate({ + sourceId: source.id, + update: { enabled }, + }) + } + onIntervalChange={(seconds) => + update.mutate({ + sourceId: source.id, + update: { pollIntervalS: seconds }, + }) + } + onPollNow={() => pollNow.mutate(source.id)} + pollNowPending={pollNow.isPending} + /> + ))} + +
+
+ )} +
+
+ ); +} + +function NotificationPreferencesCard() { + const allowedLanguages = useReleaseAnnouncementsStore( + (s) => s.allowedLanguages, + ); + const allowedPlugins = useReleaseAnnouncementsStore((s) => s.allowedPlugins); + const setAllowedLanguages = useReleaseAnnouncementsStore( + (s) => s.setAllowedLanguages, + ); + const setAllowedPlugins = useReleaseAnnouncementsStore( + (s) => s.setAllowedPlugins, + ); + + return ( + + + + + Notification preferences + + + Filter announcement toasts and the Releases nav badge. Empty means "no + filter — let everything through." Per-series mute lives on each series + detail page. + + setAllowedLanguages(values)} + /> + setAllowedPlugins(values)} + /> + + + ); +} + +interface RowProps { + source: ReleaseSource; + onToggle: (enabled: boolean) => void; + onIntervalChange: (seconds: number) => void; + onPollNow: () => void; + pollNowPending: boolean; +} + +function ReleaseSourceRow({ + source, + onToggle, + onIntervalChange, + onPollNow, + pollNowPending, +}: RowProps) { + const [draft, setDraft] = useState(source.pollIntervalS); + + const lastPolled = source.lastPolledAt + ? formatDistanceToNow(new Date(source.lastPolledAt), { addSuffix: true }) + : "—"; + + return ( + + + + + {source.displayName} + + + {source.sourceKey} + + + + + + {source.pluginId} + + + + + { + if (typeof value === "number") { + setDraft(value); + } else if (value === "") { + setDraft(null); + } + }} + onBlur={() => { + if ( + draft !== null && + draft > 0 && + draft !== source.pollIntervalS + ) { + onIntervalChange(draft); + } else { + setDraft(source.pollIntervalS); + } + }} + min={60} + max={604800} + step={60} + w={120} + suffix=" s" + aria-label="Poll interval seconds" + /> + + ≈ {intervalLabel(source.pollIntervalS)} + + + + + {lastPolled} + + + {source.lastError ? ( + + + Errored + + + ) : source.lastPolledAt ? ( + + OK + + ) : ( + + Never polled + + )} + + + onToggle(event.currentTarget.checked)} + aria-label="Enable source" + /> + + + + + + + + + + ); +} diff --git a/web/src/pages/settings/index.ts b/web/src/pages/settings/index.ts index 2fb29e99..c5e93afa 100644 --- a/web/src/pages/settings/index.ts +++ b/web/src/pages/settings/index.ts @@ -7,6 +7,7 @@ export { PdfCacheSettings } from "./PdfCacheSettings"; export { PluginStorageSettings } from "./PluginStorageSettings"; export { PluginsSettings } from "./PluginsSettings"; export { ProfileSettings } from "./ProfileSettings"; +export { ReleaseTrackingSettings } from "./ReleaseTrackingSettings"; export { SeriesExportsSettings } from "./SeriesExportsSettings"; export { ServerSettings } from "./ServerSettings"; export { SharingTagsSettings } from "./SharingTagsSettings"; diff --git a/web/src/store/releaseAnnouncementsStore.test.ts b/web/src/store/releaseAnnouncementsStore.test.ts new file mode 100644 index 00000000..2896579e --- /dev/null +++ b/web/src/store/releaseAnnouncementsStore.test.ts @@ -0,0 +1,92 @@ +import { beforeEach, describe, expect, it } from "vitest"; +import { useReleaseAnnouncementsStore } from "./releaseAnnouncementsStore"; + +describe("releaseAnnouncementsStore", () => { + beforeEach(() => { + const store = useReleaseAnnouncementsStore.getState(); + store.reset(); + store.setAllowedLanguages([]); + store.setAllowedPlugins([]); + // Clear any leftover muted series from a prior test. + const muted = Array.from(store.mutedSeriesIds); + for (const id of muted) { + store.toggleMute(id); + } + }); + + it("bump increments and reset clears the unseen counter", () => { + const store = useReleaseAnnouncementsStore.getState(); + store.bump(); + store.bump(); + expect(useReleaseAnnouncementsStore.getState().unseenCount).toBe(2); + store.reset(); + expect(useReleaseAnnouncementsStore.getState().unseenCount).toBe(0); + }); + + it("shouldNotify lets everything through when filters are empty", () => { + const { shouldNotify } = useReleaseAnnouncementsStore.getState(); + expect( + shouldNotify({ + seriesId: "s1", + pluginId: "release-nyaa", + language: "en", + }), + ).toBe(true); + }); + + it("shouldNotify blocks muted series", () => { + useReleaseAnnouncementsStore.getState().toggleMute("muted-series"); + const { shouldNotify } = useReleaseAnnouncementsStore.getState(); + expect( + shouldNotify({ + seriesId: "muted-series", + pluginId: "release-nyaa", + language: "en", + }), + ).toBe(false); + }); + + it("shouldNotify enforces language allowlist (case-insensitive)", () => { + useReleaseAnnouncementsStore.getState().setAllowedLanguages(["EN"]); + const { shouldNotify } = useReleaseAnnouncementsStore.getState(); + expect( + shouldNotify({ seriesId: "s1", pluginId: "p", language: "en" }), + ).toBe(true); + expect( + shouldNotify({ seriesId: "s1", pluginId: "p", language: "es" }), + ).toBe(false); + }); + + it("shouldNotify enforces plugin allowlist", () => { + useReleaseAnnouncementsStore + .getState() + .setAllowedPlugins(["release-mangaupdates"]); + const { shouldNotify } = useReleaseAnnouncementsStore.getState(); + expect( + shouldNotify({ + seriesId: "s1", + pluginId: "release-mangaupdates", + language: "en", + }), + ).toBe(true); + expect( + shouldNotify({ + seriesId: "s1", + pluginId: "release-nyaa", + language: "en", + }), + ).toBe(false); + }); + + it("toggleMute is reversible", () => { + const store = useReleaseAnnouncementsStore.getState(); + store.toggleMute("series-x"); + expect( + useReleaseAnnouncementsStore.getState().mutedSeriesIds.has("series-x"), + ).toBe(true); + useReleaseAnnouncementsStore.getState().toggleMute("series-x"); + expect( + useReleaseAnnouncementsStore.getState().mutedSeriesIds.has("series-x"), + ).toBe(false); + }); +}); diff --git a/web/src/store/releaseAnnouncementsStore.ts b/web/src/store/releaseAnnouncementsStore.ts new file mode 100644 index 00000000..a440c0e4 --- /dev/null +++ b/web/src/store/releaseAnnouncementsStore.ts @@ -0,0 +1,77 @@ +import { create } from "zustand"; + +interface ReleaseAnnouncementsState { + /** Number of unseen `release_announced` events since the user last visited /releases. */ + unseenCount: number; + /** Per-series mute list (series IDs whose announcements should be ignored). */ + mutedSeriesIds: Set; + /** Allowed languages; empty set means "all". Stored lower-case. */ + allowedLanguages: Set; + /** Allowed plugin IDs; empty set means "all". */ + allowedPlugins: Set; + + /** Increment the badge counter (called by the SSE handler). */ + bump: () => void; + /** Reset the badge counter (called when the user visits /releases). */ + reset: () => void; + /** Toggle a per-series mute. */ + toggleMute: (seriesId: string) => void; + /** Replace the language allowlist. */ + setAllowedLanguages: (languages: string[]) => void; + /** Replace the plugin allowlist. */ + setAllowedPlugins: (plugins: string[]) => void; + /** + * Decide whether an incoming event should bump the badge or surface as a + * toast. Pure function, exposed so the SSE handler and tests can share it. + */ + shouldNotify: (params: { + seriesId: string; + pluginId: string; + language: string; + }) => boolean; +} + +export const useReleaseAnnouncementsStore = create()( + (set, get) => ({ + unseenCount: 0, + mutedSeriesIds: new Set(), + allowedLanguages: new Set(), + allowedPlugins: new Set(), + + bump: () => set((state) => ({ unseenCount: state.unseenCount + 1 })), + reset: () => set({ unseenCount: 0 }), + + toggleMute: (seriesId) => + set((state) => { + const next = new Set(state.mutedSeriesIds); + if (next.has(seriesId)) { + next.delete(seriesId); + } else { + next.add(seriesId); + } + return { mutedSeriesIds: next }; + }), + + setAllowedLanguages: (languages) => + set({ + allowedLanguages: new Set(languages.map((l) => l.toLowerCase())), + }), + + setAllowedPlugins: (plugins) => set({ allowedPlugins: new Set(plugins) }), + + shouldNotify: ({ seriesId, pluginId, language }) => { + const { mutedSeriesIds, allowedLanguages, allowedPlugins } = get(); + if (mutedSeriesIds.has(seriesId)) return false; + if ( + allowedLanguages.size > 0 && + !allowedLanguages.has(language.toLowerCase()) + ) { + return false; + } + if (allowedPlugins.size > 0 && !allowedPlugins.has(pluginId)) { + return false; + } + return true; + }, + }), +); diff --git a/web/src/types/api.generated.ts b/web/src/types/api.generated.ts index 845df5e9..417c5325 100644 --- a/web/src/types/api.generated.ts +++ b/web/src/types/api.generated.ts @@ -9470,6 +9470,35 @@ export interface components { pluginId: string; /** @enum {string} */ type: "plugin_deleted"; + } | { + /** + * Format: double + * @description Chapter announced (if the source emits chapters). + */ + chapter?: number | null; + /** + * @description Language code (e.g. `"en"`); used by client-side notification + * preference filters. + */ + language: string; + /** Format: uuid */ + ledgerId: string; + /** + * @description Plugin name that owns the source (`release_sources.plugin_id`). + * Helps the frontend filter without an extra lookup. + */ + pluginId: string; + /** Format: uuid */ + seriesId: string; + /** Format: uuid */ + sourceId: string; + /** @enum {string} */ + type: "release_announced"; + /** + * Format: int32 + * @description Volume announced (if the source emits volumes). + */ + volume?: number | null; }; /** * @description Type of entity that was changed diff --git a/web/src/types/index.ts b/web/src/types/index.ts index 93e1d708..bc20124c 100644 --- a/web/src/types/index.ts +++ b/web/src/types/index.ts @@ -244,6 +244,12 @@ export function isPluginEvent(event: EntityEvent): event is EntityEvent & { ); } +export function isReleaseAnnouncedEvent( + event: EntityEvent, +): event is EntityEvent & { type: "release_announced" } { + return event.type === "release_announced"; +} + // ============================================================================= // Re-export the raw generated types for advanced use cases // ============================================================================= From f6eca1198a370dee33bb6b6baef1439abbe94cea Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Sun, 3 May 2026 23:25:17 -0700 Subject: [PATCH 09/29] feat(release-tracking): add Nyaa uploader-feed release-source plugin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds the release-nyaa plugin as the first acquisition-pointer source, completing the three-signal release-tracking model: upstream gap (Phase 5), translation feed (MangaUpdates), and now where-to-acquire (Nyaa). Replaces a brittle external n8n flow with idempotent, alias-matched announcements. The plugin polls Nyaa user RSS feeds (and optional `q:` search feeds for groups without an account) for an admin-configured uploader allowlist, parses titles into structured chapter/volume/format fields, and matches against tracked-series aliases via normalized exact match (0.95 confidence) or token+character bigram Sørensen-Dice fuzzy match floored at 0.85 ratio. Notable design choices: - One source row walks all uploader subscriptions (no admin endpoint exists yet for materializing one row per subscription); mirrors how release-mangaupdates polls all tracked series within one source row. - Normalization mirrors the host's normalize_alias Rust impl so exact matches between Nyaa titles and stored aliases are deterministic. - ETag is single-bucket on the source row; daily polls + small RSS bodies make per-subscription state slots a deferred optimization. - Title parser handles the user's mixed-format screenshot shapes (1r0n volume releases, v01-14 ranges, c126-142 ranges, decimal chapters, Digital/JXL hints). Also wires release-nyaa and release-mangaupdates (a Phase 6 wiring gap caught here) into docker-compose plugin dist mounts, the plugins-builder build/watch lists, and the GitHub Actions plugin matrices in both lint/test and per-plugin-binary build jobs. User docs covering uploader subscription syntax, alias-matching expectations, configuration reference, limitations, and risks land under docs/docs/plugins/release-nyaa.md. Plugin code ships with unit and end-to-end tests. --- .github/workflows/build.yml | 4 + .github/workflows/ci.yml | 2 + docker-compose.yml | 14 +- docs/docs/plugins/release-nyaa.md | 90 + plugins/release-nyaa/package-lock.json | 1971 ++++++++++++++++++++++ plugins/release-nyaa/package.json | 52 + plugins/release-nyaa/src/fetcher.test.ts | 165 ++ plugins/release-nyaa/src/fetcher.ts | 167 ++ plugins/release-nyaa/src/index.test.ts | 252 +++ plugins/release-nyaa/src/index.ts | 408 +++++ plugins/release-nyaa/src/manifest.ts | 79 + plugins/release-nyaa/src/matcher.test.ts | 130 ++ plugins/release-nyaa/src/matcher.ts | 216 +++ plugins/release-nyaa/src/parser.test.ts | 232 +++ plugins/release-nyaa/src/parser.ts | 350 ++++ plugins/release-nyaa/tsconfig.json | 24 + plugins/release-nyaa/vitest.config.ts | 7 + 17 files changed, 4161 insertions(+), 2 deletions(-) create mode 100644 docs/docs/plugins/release-nyaa.md create mode 100644 plugins/release-nyaa/package-lock.json create mode 100644 plugins/release-nyaa/package.json create mode 100644 plugins/release-nyaa/src/fetcher.test.ts create mode 100644 plugins/release-nyaa/src/fetcher.ts create mode 100644 plugins/release-nyaa/src/index.test.ts create mode 100644 plugins/release-nyaa/src/index.ts create mode 100644 plugins/release-nyaa/src/manifest.ts create mode 100644 plugins/release-nyaa/src/matcher.test.ts create mode 100644 plugins/release-nyaa/src/matcher.ts create mode 100644 plugins/release-nyaa/src/parser.test.ts create mode 100644 plugins/release-nyaa/src/parser.ts create mode 100644 plugins/release-nyaa/tsconfig.json create mode 100644 plugins/release-nyaa/vitest.config.ts diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4d3ac42e..f6b930b4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -152,6 +152,8 @@ jobs: - metadata-openlibrary - recommendations-anilist - sync-anilist + - release-mangaupdates + - release-nyaa steps: - uses: actions/checkout@v4 - name: Setup Node.js @@ -562,6 +564,8 @@ jobs: - metadata-openlibrary - recommendations-anilist - sync-anilist + - release-mangaupdates + - release-nyaa steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 39c82570..df4bfc1c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -134,6 +134,8 @@ jobs: - metadata-openlibrary - recommendations-anilist - sync-anilist + - release-mangaupdates + - release-nyaa steps: - uses: actions/checkout@v4 - name: Setup Node.js diff --git a/docker-compose.yml b/docker-compose.yml index 1d4908e6..7dff00b4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -94,6 +94,8 @@ services: - ./plugins/metadata-openlibrary/dist:/opt/codex/plugins/metadata-openlibrary/dist:ro - ./plugins/recommendations-anilist/dist:/opt/codex/plugins/recommendations-anilist/dist:ro - ./plugins/sync-anilist/dist:/opt/codex/plugins/sync-anilist/dist:ro + - ./plugins/release-mangaupdates/dist:/opt/codex/plugins/release-mangaupdates/dist:ro + - ./plugins/release-nyaa/dist:/opt/codex/plugins/release-nyaa/dist:ro environment: RUST_BACKTRACE: 1 # Email configuration for Mailhog @@ -156,6 +158,8 @@ services: - ./plugins/metadata-openlibrary/dist:/opt/codex/plugins/metadata-openlibrary/dist:ro - ./plugins/recommendations-anilist/dist:/opt/codex/plugins/recommendations-anilist/dist:ro - ./plugins/sync-anilist/dist:/opt/codex/plugins/sync-anilist/dist:ro + - ./plugins/release-mangaupdates/dist:/opt/codex/plugins/release-mangaupdates/dist:ro + - ./plugins/release-nyaa/dist:/opt/codex/plugins/release-nyaa/dist:ro command: [ "cargo", @@ -217,6 +221,8 @@ services: - /plugins/metadata-openlibrary/node_modules - /plugins/recommendations-anilist/node_modules - /plugins/sync-anilist/node_modules + - /plugins/release-mangaupdates/node_modules + - /plugins/release-nyaa/node_modules command: - sh - -c @@ -228,15 +234,19 @@ services: cd /plugins/metadata-openlibrary && npm install && npm run build && cd /plugins/recommendations-anilist && npm install && npm run build && cd /plugins/sync-anilist && npm install && npm run build && + cd /plugins/release-mangaupdates && npm install && npm run build && + cd /plugins/release-nyaa && npm install && npm run build && echo 'Initial build complete. Watching for changes...' && npm install -g concurrently && - concurrently --names 'sdk,echo,mangabaka,openlibrary,rec-anilist,sync-anilist' --prefix-colors 'blue,green,yellow,magenta,cyan,red' \ + concurrently --names 'sdk,echo,mangabaka,openlibrary,rec-anilist,sync-anilist,rel-mu,rel-nyaa' --prefix-colors 'blue,green,yellow,magenta,cyan,red,gray,white' \ "cd /plugins/sdk-typescript && npm run dev" \ "cd /plugins/metadata-echo && npm run dev" \ "cd /plugins/metadata-mangabaka && npm run dev" \ "cd /plugins/metadata-openlibrary && npm run dev" \ "cd /plugins/recommendations-anilist && npm run dev" \ - "cd /plugins/sync-anilist && npm run dev" + "cd /plugins/sync-anilist && npm run dev" \ + "cd /plugins/release-mangaupdates && npm run dev" \ + "cd /plugins/release-nyaa && npm run dev" networks: - codex-network profiles: diff --git a/docs/docs/plugins/release-nyaa.md b/docs/docs/plugins/release-nyaa.md new file mode 100644 index 00000000..8d63c9f1 --- /dev/null +++ b/docs/docs/plugins/release-nyaa.md @@ -0,0 +1,90 @@ +--- +--- + +# Nyaa Releases Plugin + +The Nyaa Releases plugin announces new chapter and volume torrents for tracked series by polling Nyaa.si user RSS feeds. Unlike the [MangaUpdates plugin](./release-mangaupdates.md), which tells you *what* has been released in your languages, the Nyaa plugin tells you *where to download* a release that exists. It is **notify-only**: Codex never downloads torrents. + +## What it's for + +Nyaa is an acquisition-pointer source. It complements (not replaces) the translation-feed plugins: + +- **MangaUpdates** answers: "Has chapter 143 been released in English?" +- **Nyaa** answers: "Is there a torrent for chapter 143 from a trusted uploader?" + +Use Nyaa when you've already decided on a small allowlist of trusted uploaders (e.g. `1r0n`) and want a single feed of "new releases from these people" filtered down to your tracked series. + +## Features + +- Per-uploader (or per-search-query) RSS polling against Nyaa.si user feeds. +- Alias-based series matching: each parsed Nyaa title is normalized and compared to every tracked series' alias list. +- Confidence scoring: exact normalized match → 0.95; fuzzy near-match (Sørensen-Dice) → 0.7-0.85; everything below is dropped before reaching the host. +- Format-hint extraction: `(Digital)`, `(JXL)`, `(Magazine)`, etc. surface on the candidate's `formatHints` for downstream filtering. +- Volume and chapter ranges are recognized: `[1r0n] Boruto v01-14 (Digital)` and `[Group] Dandadan c126-142 (Digital)` parse correctly and pass both ends to the host. +- Idempotent ledger writes (re-polling never re-announces an already-seen release). +- Daily default poll interval; conditional GETs (ETag + Last-Modified) keep bandwidth low. +- Per-host backoff is driven by the host on 429 / 503 responses. + +## How it works + +1. Codex schedules a poll for the source row (default: once per 24 hours). +2. The plugin reads the configured uploader subscription list. +3. The plugin asks the host for tracked series along with their aliases (`releases/list_tracked` with `requires_aliases: true`). +4. For each subscription, the plugin fetches the Nyaa feed: + - User feed: `https://nyaa.si/?page=rss&u=` + - Search feed (for groups without a user account): `https://nyaa.si/?page=rss&q=` +5. Each RSS item is parsed: a leading `[Group]` token, chapter / volume token (single or range), and parenthesized format hints are extracted; the remaining text is the *series guess*. +6. The series guess is normalized and matched against tracked-series aliases. Confidence ≥ 0.95 on exact normalized match; otherwise the matcher computes a token-level Dice ratio and rejects below 0.85. +7. Matching candidates are submitted to the host's release ledger via `releases/record`. The host applies its threshold (default 0.7) and dedups on `(source_id, external_release_id)` and on `info_hash` (Nyaa's `nyaa:infoHash` element). + +The plugin **never** downloads release files. The "Open" link on the inbox row sends you to the Nyaa view page or the `.torrent` URL; how you acquire the chapter is up to you. + +## Setup + +### Configure uploader subscriptions + +The plugin's `uploaders` admin field is a comma-separated list of trusted uploader handles or queries: + +``` +uploaders: "1r0n,TankobonBlur,q:LuminousScans" +``` + +- Plain identifier (`1r0n`) → user feed (`https://nyaa.si/?page=rss&u=1r0n`). +- `q:` or `query:` → search feed (`https://nyaa.si/?page=rss&q=`). Use this for groups without a Nyaa account, or to scope by tag. + +Empty tokens are dropped; case-insensitive duplicates are silently deduplicated. The plugin walks subscriptions in declaration order on each poll. + +### Make sure tracked series have aliases + +Nyaa releases identify a series only by name in the title. The plugin matches titles to series via the `series_aliases` table: + +- The `BackfillTrackingFromMetadata` task (Phase 1) seeds aliases from each series' `series_metadata.title`, `title_sort`, and alternate titles. +- You can also add aliases manually via the Tracking panel on a series detail page. + +For best results, add aliases that mirror how your trusted uploaders name the release. Example: 1r0n names `Boruto: Two Blue Vortex` as `[1r0n] Boruto - Two Blue Vortex - Volume NN (Digital)`. The default normalization produces `boruto two blue vortex` from both forms, so an exact match is automatic — but if you track *Boruto* with only the alias `Boruto`, the matcher will see `boruto two blue vortex` and reject it as not similar enough to `boruto`. + +### Source row + +A `release_sources` row with `plugin_id="release-nyaa"` and `kind="rss-uploader"` must exist before the scheduler will poll. (See [Release tracking architecture](../architecture/release-tracking.md) for the broader picture; admin UI to create and manage source rows is tracked as a follow-up.) + +## Configuration reference + +| Field | Scope | Default | Notes | +| ------------------ | ------------ | ---------------------- | -------------------------------------------------------------------------------------------------- | +| `uploaders` | admin | `""` | Comma-separated subscription list. Plain identifier = user feed; `q:` = search feed. | +| `requestTimeoutMs` | admin | `10000` | Hard timeout per Nyaa fetch. Clamped to `[1000, 60000]`. | +| `baseUrl` | admin | `https://nyaa.si` | Override base URL — useful for mirrors. Trailing slashes are trimmed. | + +## Limitations + +- **One source row, many uploaders.** The plan called for one source row per uploader subscription, but the host has no admin endpoint for creating `release_sources` rows yet. Until that ships, all uploader subscriptions ride a single source row's poll cadence and ETag bucket. With daily polls the difference is academic; if you're adding many uploaders or want per-uploader poll intervals, this will need revisiting. +- **ETag is single-bucket.** The source row stores one ETag — the plugin uses it on the *first* uploader fetched and walks subsequent uploaders unconditionally. Daily polls + small RSS bodies make this acceptable; per-subscription ETags would need per-(source, subscription) state. +- **Language is hardcoded to English.** Nyaa releases don't carry a language tag, and 99% of the uploaders this plugin targets release English-language scans. Admins who add non-English uploaders should configure tracked series' `languages` accordingly so the host's `latest_known_*` advance gate doesn't pollute the high-water mark with releases the user can't read. +- **Title parsing is best-effort.** The corpus covers the common 1r0n / TankobonBlur shapes plus generic `Volume NN` / `Chapter NNN` forms. Edge-case titles (e.g. unusual punctuation, missing separators) may parse with an empty `seriesGuess`; the matcher silently rejects those entries (no false positives). +- **No per-uploader confidence weighting in v1.** Every matched candidate gets the same confidence based on the alias match alone. Adding per-uploader trust scores (downgrade an uploader after N user dismissals) is on the roadmap but not load-bearing at v1's tracked-series scale. + +## Risks + +- **Rate limits.** Nyaa serves RSS publicly without API keys, but it's a small site and aggressive polling is unwelcome. The plugin uses a daily default cadence and per-host backoff (driven by the host) to back off on 429 / 5xx responses. Don't reduce the interval below the default unless you have a specific reason. +- **Title-parsing false positives.** Alias-only matching is fundamentally fuzzier than the external-ID match used by MangaUpdates. The matcher's 0.85 Dice floor + 0.95 exact-confidence give the host's threshold (default 0.7) enough headroom to drop bad matches, but watch the inbox for the first few days after enabling and dismiss anything mis-matched. Repeated dismissals tell you which series need additional aliases. +- **Quality varies by uploader.** This is *acquisition pointer* data. The plugin doesn't validate that the underlying torrent is what its title claims to be; that's why the user maintains the uploader allowlist. diff --git a/plugins/release-nyaa/package-lock.json b/plugins/release-nyaa/package-lock.json new file mode 100644 index 00000000..22f5f4b6 --- /dev/null +++ b/plugins/release-nyaa/package-lock.json @@ -0,0 +1,1971 @@ +{ + "name": "@ashdev/codex-plugin-release-nyaa", + "version": "1.18.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "@ashdev/codex-plugin-release-nyaa", + "version": "1.18.0", + "license": "MIT", + "dependencies": { + "@ashdev/codex-plugin-sdk": "file:../sdk-typescript" + }, + "bin": { + "codex-plugin-release-nyaa": "dist/index.js" + }, + "devDependencies": { + "@biomejs/biome": "^2.4.4", + "@types/node": "^22.0.0", + "esbuild": "^0.27.3", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=22.0.0" + } + }, + "../sdk-typescript": { + "name": "@ashdev/codex-plugin-sdk", + "version": "1.18.0", + "license": "MIT", + "devDependencies": { + "@biomejs/biome": "^2.4.4", + "@types/node": "^22.0.0", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + }, + "engines": { + "node": ">=22.0.0" + } + }, + "node_modules/@ashdev/codex-plugin-sdk": { + "resolved": "../sdk-typescript", + "link": true + }, + "node_modules/@biomejs/biome": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/biome/-/biome-2.4.14.tgz", + "integrity": "sha512-TmAvxOEgrpLypzVGJ8FulIZnlyA9TxrO1hyqYrCz9r+bwma9xXxuLA5IuYnj55XQneFx460KjRbx6SWGLkg3bQ==", + "dev": true, + "license": "MIT OR Apache-2.0", + "bin": { + "biome": "bin/biome" + }, + "engines": { + "node": ">=14.21.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/biome" + }, + "optionalDependencies": { + "@biomejs/cli-darwin-arm64": "2.4.14", + "@biomejs/cli-darwin-x64": "2.4.14", + "@biomejs/cli-linux-arm64": "2.4.14", + "@biomejs/cli-linux-arm64-musl": "2.4.14", + "@biomejs/cli-linux-x64": "2.4.14", + "@biomejs/cli-linux-x64-musl": "2.4.14", + "@biomejs/cli-win32-arm64": "2.4.14", + "@biomejs/cli-win32-x64": "2.4.14" + } + }, + "node_modules/@biomejs/cli-darwin-arm64": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-arm64/-/cli-darwin-arm64-2.4.14.tgz", + "integrity": "sha512-XvgoE9XOawUOQPdmvs4J7wPhi/DLwSCGks3AlPJDmh34O0awRTqCED1HRcRDdpf1Zrp4us4MGOOdIxNpbqNF5Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-darwin-x64": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-darwin-x64/-/cli-darwin-x64-2.4.14.tgz", + "integrity": "sha512-jE7hKBCFhOx3uUh+ZkWBfOHxAcILPfhFplNkuID/eZeSTLHzfZzoZxW8fbqY9xXRnPi7jGNAf1iPVR+0yWsM/Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64/-/cli-linux-arm64-2.4.14.tgz", + "integrity": "sha512-2TELhZnW5RSLL063l9rc5xLpA0ZIw0Ccwy/0q384rvNAgFw3yI76bd59547yxowdQr5MNPET/xDLrLuvgSeeWQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-arm64-musl": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-arm64-musl/-/cli-linux-arm64-musl-2.4.14.tgz", + "integrity": "sha512-/z+6gqAqqUQTHazwStxSXKHg9b8UvqBmDFRp+c4wYbq2KXhELQDon9EoC9RpmQ8JWkqQx/lIUy/cs+MhzDZp6A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64/-/cli-linux-x64-2.4.14.tgz", + "integrity": "sha512-zHrlQZDBDUz4OLAraYpWKcnLS6HOewBFWYOzY91d1ZjdqZwibOyb6BEu6WuWLugyo0P3riCmsbV9UqV1cSXwQg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-linux-x64-musl": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-linux-x64-musl/-/cli-linux-x64-musl-2.4.14.tgz", + "integrity": "sha512-R6BWgJdQOwW9ulJatuTVrQkjnODjqHZkKNOqb1sz++3Noe5LYd0i3PchnOBUCYAPHoPWHhjJqbdZlHEu0hpjdA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-arm64": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-arm64/-/cli-win32-arm64-2.4.14.tgz", + "integrity": "sha512-M3EH5hqOI/F/FUA2u4xcLoUgmxd218mvuj/6JL7Hv2toQvr2/AdOvKSpGkoRuWFCtQPVa+ZqkEV3Q5xBA9+XSA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@biomejs/cli-win32-x64": { + "version": "2.4.14", + "resolved": "https://registry.npmjs.org/@biomejs/cli-win32-x64/-/cli-win32-x64-2.4.14.tgz", + "integrity": "sha512-WL0EG5qE+EAKomGXbf2g6VnSKJhTL3tXC0QRzWRwA5VpjxNYa6H4P7ZWfymbGE4IhZZQi1KXQ2R0YjwInmz2fA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT OR Apache-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=14.21.3" + } + }, + "node_modules/@emnapi/core": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.10.0.tgz", + "integrity": "sha512-yq6OkJ4p82CAfPl0u9mQebQHKPJkY7WrIuk205cTYnYe+k2Z8YBh11FrbRG/H6ihirqcacOgl2BIO8oyMQLeXw==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.1", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.10.0.tgz", + "integrity": "sha512-ewvYlk86xUoGI0zQRNq/mC+16R1QeDlKQy21Ki3oSYXNgLb45GV1P6A0M+/s6nyCuNDqe5VpaY84BzXGwVbwFA==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/wasi-threads": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", + "integrity": "sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.7.tgz", + "integrity": "sha512-EKX3Qwmhz1eMdEJokhALr0YiD0lhQNwDqkPYyPhiSwKrh7/4KRjQc04sZ8db+5DVVnZ1LmbNDI1uAMPEUBnQPg==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.27.7.tgz", + "integrity": "sha512-jbPXvB4Yj2yBV7HUfE2KHe4GJX51QplCN1pGbYjvsyCZbQmies29EoJbkEc+vYuU5o45AfQn37vZlyXy4YJ8RQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.27.7.tgz", + "integrity": "sha512-62dPZHpIXzvChfvfLJow3q5dDtiNMkwiRzPylSCfriLvZeq0a1bWChrGx/BbUbPwOrsWKMn8idSllklzBy+dgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.27.7.tgz", + "integrity": "sha512-x5VpMODneVDb70PYV2VQOmIUUiBtY3D3mPBG8NxVk5CogneYhkR7MmM3yR/uMdITLrC1ml/NV1rj4bMJuy9MCg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.27.7.tgz", + "integrity": "sha512-5lckdqeuBPlKUwvoCXIgI2D9/ABmPq3Rdp7IfL70393YgaASt7tbju3Ac+ePVi3KDH6N2RqePfHnXkaDtY9fkw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.27.7.tgz", + "integrity": "sha512-rYnXrKcXuT7Z+WL5K980jVFdvVKhCHhUwid+dDYQpH+qu+TefcomiMAJpIiC2EM3Rjtq0sO3StMV/+3w3MyyqQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.7.tgz", + "integrity": "sha512-B48PqeCsEgOtzME2GbNM2roU29AMTuOIN91dsMO30t+Ydis3z/3Ngoj5hhnsOSSwNzS+6JppqWsuhTp6E82l2w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.27.7.tgz", + "integrity": "sha512-jOBDK5XEjA4m5IJK3bpAQF9/Lelu/Z9ZcdhTRLf4cajlB+8VEhFFRjWgfy3M1O4rO2GQ/b2dLwCUGpiF/eATNQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.27.7.tgz", + "integrity": "sha512-RkT/YXYBTSULo3+af8Ib0ykH8u2MBh57o7q/DAs3lTJlyVQkgQvlrPTnjIzzRPQyavxtPtfg0EopvDyIt0j1rA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.27.7.tgz", + "integrity": "sha512-RZPHBoxXuNnPQO9rvjh5jdkRmVizktkT7TCDkDmQ0W2SwHInKCAV95GRuvdSvA7w4VMwfCjUiPwDi0ZO6Nfe9A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.27.7.tgz", + "integrity": "sha512-GA48aKNkyQDbd3KtkplYWT102C5sn/EZTY4XROkxONgruHPU72l+gW+FfF8tf2cFjeHaRbWpOYa/uRBz/Xq1Pg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.27.7.tgz", + "integrity": "sha512-a4POruNM2oWsD4WKvBSEKGIiWQF8fZOAsycHOt6JBpZ+JN2n2JH9WAv56SOyu9X5IqAjqSIPTaJkqN8F7XOQ5Q==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.27.7.tgz", + "integrity": "sha512-KabT5I6StirGfIz0FMgl1I+R1H73Gp0ofL9A3nG3i/cYFJzKHhouBV5VWK1CSgKvVaG4q1RNpCTR2LuTVB3fIw==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.27.7.tgz", + "integrity": "sha512-gRsL4x6wsGHGRqhtI+ifpN/vpOFTQtnbsupUF5R5YTAg+y/lKelYR1hXbnBdzDjGbMYjVJLJTd2OFmMewAgwlQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.27.7.tgz", + "integrity": "sha512-hL25LbxO1QOngGzu2U5xeXtxXcW+/GvMN3ejANqXkxZ/opySAZMrc+9LY/WyjAan41unrR3YrmtTsUpwT66InQ==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.27.7.tgz", + "integrity": "sha512-2k8go8Ycu1Kb46vEelhu1vqEP+UeRVj2zY1pSuPdgvbd5ykAw82Lrro28vXUrRmzEsUV0NzCf54yARIK8r0fdw==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.7.tgz", + "integrity": "sha512-hzznmADPt+OmsYzw1EE33ccA+HPdIqiCRq7cQeL1Jlq2gb1+OyWBkMCrYGBJ+sxVzve2ZJEVeePbLM2iEIZSxA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.7.tgz", + "integrity": "sha512-b6pqtrQdigZBwZxAn1UpazEisvwaIDvdbMbmrly7cDTMFnw/+3lVxxCTGOrkPVnsYIosJJXAsILG9XcQS+Yu6w==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.27.7.tgz", + "integrity": "sha512-OfatkLojr6U+WN5EDYuoQhtM+1xco+/6FSzJJnuWiUw5eVcicbyK3dq5EeV/QHT1uy6GoDhGbFpprUiHUYggrw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.7.tgz", + "integrity": "sha512-AFuojMQTxAz75Fo8idVcqoQWEHIXFRbOc1TrVcFSgCZtQfSdc1RXgB3tjOn/krRHENUB4j00bfGjyl2mJrU37A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.27.7.tgz", + "integrity": "sha512-+A1NJmfM8WNDv5CLVQYJ5PshuRm/4cI6WMZRg1by1GwPIQPCTs1GLEUHwiiQGT5zDdyLiRM/l1G0Pv54gvtKIg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.7.tgz", + "integrity": "sha512-+KrvYb/C8zA9CU/g0sR6w2RBw7IGc5J2BPnc3dYc5VJxHCSF1yNMxTV5LQ7GuKteQXZtspjFbiuW5/dOj7H4Yw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.27.7.tgz", + "integrity": "sha512-ikktIhFBzQNt/QDyOL580ti9+5mL/YZeUPKU2ivGtGjdTYoqz6jObj6nOMfhASpS4GU4Q/Clh1QtxWAvcYKamA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.27.7.tgz", + "integrity": "sha512-7yRhbHvPqSpRUV7Q20VuDwbjW5kIMwTHpptuUzV+AA46kiPze5Z7qgt6CLCK3pWFrHeNfDd1VKgyP4O+ng17CA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.27.7.tgz", + "integrity": "sha512-SmwKXe6VHIyZYbBLJrhOoCJRB/Z1tckzmgTLfFYOfpMAx63BJEaL9ExI8x7v0oAO3Zh6D/Oi1gVxEYr5oUCFhw==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.27.7.tgz", + "integrity": "sha512-56hiAJPhwQ1R4i+21FVF7V8kSD5zZTdHcVuRFMW0hn753vVfQN8xlx4uOPT4xoGH0Z/oVATuR82AiqSTDIpaHg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.4.tgz", + "integrity": "sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@tybys/wasm-util": "^0.10.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "peerDependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1" + } + }, + "node_modules/@oxc-project/types": { + "version": "0.127.0", + "resolved": "https://registry.npmjs.org/@oxc-project/types/-/types-0.127.0.tgz", + "integrity": "sha512-aIYXQBo4lCbO4z0R3FHeucQHpF46l2LbMdxRvqvuRuW2OxdnSkcng5B8+K12spgLDj93rtN3+J2Vac/TIO+ciQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Boshen" + } + }, + "node_modules/@rolldown/binding-android-arm64": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.17.tgz", + "integrity": "sha512-s70pVGhw4zqGeFnXWvAzJDlvxhlRollagdCCKRgOsgUOH3N1l0LIxf83AtGzmb5SiVM4Hjl5HyarMRfdfj3DaQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-arm64": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-arm64/-/binding-darwin-arm64-1.0.0-rc.17.tgz", + "integrity": "sha512-4ksWc9n0mhlZpZ9PMZgTGjeOPRu8MB1Z3Tz0Mo02eWfWCHMW1zN82Qz/pL/rC+yQa+8ZnutMF0JjJe7PjwasYw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-x64": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-x64/-/binding-darwin-x64-1.0.0-rc.17.tgz", + "integrity": "sha512-SUSDOI6WwUVNcWxd02QEBjLdY1VPHvlEkw6T/8nYG322iYWCTxRb1vzk4E+mWWYehTp7ERibq54LSJGjmouOsw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-freebsd-x64": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-freebsd-x64/-/binding-freebsd-x64-1.0.0-rc.17.tgz", + "integrity": "sha512-hwnz3nw9dbJ05EDO/PvcjaaewqqDy7Y1rn1UO81l8iIK1GjenME75dl16ajbvSSMfv66WXSRCYKIqfgq2KCfxw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm-gnueabihf": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.0.0-rc.17.tgz", + "integrity": "sha512-IS+W7epTcwANmFSQFrS1SivEXHtl1JtuQA9wlxrZTcNi6mx+FDOYrakGevvvTwgj2JvWiK8B29/qD9BELZPyXQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-gnu": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.0.0-rc.17.tgz", + "integrity": "sha512-e6usGaHKW5BMNZOymS1UcEYGowQMWcgZ71Z17Sl/h2+ZziNJ1a9n3Zvcz6LdRyIW5572wBCTH/Z+bKuZouGk9Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-musl": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.0.0-rc.17.tgz", + "integrity": "sha512-b/CgbwAJpmrRLp02RPfhbudf5tZnN9nsPWK82znefso832etkem8H7FSZwxrOI9djcdTP7U6YfNhbRnh7djErg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-ppc64-gnu": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.0.0-rc.17.tgz", + "integrity": "sha512-4EII1iNGRUN5WwGbF/kOh/EIkoDN9HsupgLQoXfY+D1oyJm7/F4t5PYU5n8SWZgG0FEwakyM8pGgwcBYruGTlA==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-s390x-gnu": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.0.0-rc.17.tgz", + "integrity": "sha512-AH8oq3XqQo4IibpVXvPeLDI5pzkpYn0WiZAfT05kFzoJ6tQNzwRdDYQ45M8I/gslbodRZwW8uxLhbSBbkv96rA==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-gnu": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.0.0-rc.17.tgz", + "integrity": "sha512-cLnjV3xfo7KslbU41Z7z8BH/E1y5mzUYzAqih1d1MDaIGZRCMqTijqLv76/P7fyHuvUcfGsIpqCdddbxLLK9rA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-musl": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-musl/-/binding-linux-x64-musl-1.0.0-rc.17.tgz", + "integrity": "sha512-0phclDw1spsL7dUB37sIARuis2tAgomCJXAHZlpt8PXZ4Ba0dRP1e+66lsRqrfhISeN9bEGNjQs+T/Fbd7oYGw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-openharmony-arm64": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-openharmony-arm64/-/binding-openharmony-arm64-1.0.0-rc.17.tgz", + "integrity": "sha512-0ag/hEgXOwgw4t8QyQvUCxvEg+V0KBcA6YuOx9g0r02MprutRF5dyljgm3EmR02O292UX7UeS6HzWHAl6KgyhA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-wasm32-wasi": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-wasm32-wasi/-/binding-wasm32-wasi-1.0.0-rc.17.tgz", + "integrity": "sha512-LEXei6vo0E5wTGwpkJ4KoT3OZJRnglwldt5ziLzOlc6qqb55z4tWNq2A+PFqCJuvWWdP53CVhG1Z9NtToDPJrA==", + "cpu": [ + "wasm32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "1.10.0", + "@emnapi/runtime": "1.10.0", + "@napi-rs/wasm-runtime": "^1.1.4" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-win32-arm64-msvc": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.17.tgz", + "integrity": "sha512-gUmyzBl3SPMa6hrqFUth9sVfcLBlYsbMzBx5PlexMroZStgzGqlZ26pYG89rBb45Mnia+oil6YAIFeEWGWhoZA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-win32-x64-msvc": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.0.0-rc.17.tgz", + "integrity": "sha512-3hkiolcUAvPB9FLb3UZdfjVVNWherN1f/skkGWJP/fgSQhYUZpSIRr0/I8ZK9TkF3F7kxvJAk0+IcKvPHk9qQg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.17.tgz", + "integrity": "sha512-n8iosDOt6Ig1UhJ2AYqoIhHWh/isz0xpicHTzpKBeotdVsTEcxsSA/i3EVM7gQAj0rU27OLAxCjzlj15IWY7bg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tybys/wasm-util": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.2.tgz", + "integrity": "sha512-RoBvJ2X0wuKlWFIjrwffGw1IqZHKQqzIchKaadZZfnNpsAYp2mM0h36JtPCjNDAHGgYez/15uMBpfGwchhiMgg==", + "dev": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.19.17", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.17.tgz", + "integrity": "sha512-wGdMcf+vPYM6jikpS/qhg6WiqSV/OhG+jeeHT/KlVqxYfD40iYJf9/AE1uQxVWFvU7MipKRkRv8NSHiCGgPr8Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@vitest/expect": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.1.5.tgz", + "integrity": "sha512-PWBaRY5JoKuRnHlUHfpV/KohFylaDZTupcXN1H9vYryNLOnitSw60Mw9IAE2r67NbwwzBw/Cc/8q9BK3kIX8Kw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.1.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.1.5", + "@vitest/utils": "4.1.5", + "chai": "^6.2.2", + "tinyrainbow": "^3.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.1.5.tgz", + "integrity": "sha512-/x2EmFC4mT4NNzqvC3fmesuV97w5FC903KPmey4gsnJiMQ3Be1IlDKVaDaG8iqaLFHqJ2FVEkxZk5VmeLjIItw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.1.5", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.1.5.tgz", + "integrity": "sha512-7I3q6l5qr03dVfMX2wCo9FxwSJbPdwKjy2uu/YPpU3wfHvIL4QHwVRp57OfGrDFeUJ8/8QdfBKIV12FTtLn00g==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.1.5.tgz", + "integrity": "sha512-2D+o7Pr82IEO46YPpoA/YU0neeyr6FTerQb5Ro7BUnBuv6NQtT/kmVnczngiMEBhzgqz2UZYl5gArejsyERDSQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.1.5", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.1.5.tgz", + "integrity": "sha512-zypXEt4KH/XgKGPUz4eC2AvErYx0My5hfL8oDb1HzGFpEk1P62bxSohdyOmvz+d9UJwanI68MKwr2EquOaOgMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.1.5", + "@vitest/utils": "4.1.5", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.1.5.tgz", + "integrity": "sha512-2lNOsh6+R2Idnf1TCZqSwYlKN2E/iDlD8sgU59kYVl+OMDmvldO1VDk39smRfpUNwYpNRVn3w4YfuC7KfbBnkQ==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.1.5.tgz", + "integrity": "sha512-76wdkrmfXfqGjueGgnb45ITPyUi1ycZ4IHgC2bhPDUfWHklY/q3MdLOAB+TF1e6xfl8NxNY0ZYaPCFNWSsw3Ug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.1.5", + "convert-source-map": "^2.0.0", + "tinyrainbow": "^3.1.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/es-module-lexer": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.1.0.tgz", + "integrity": "sha512-n27zTYMjYu1aj4MjCWzSP7G9r75utsaoc8m61weK+W8JMBGGQybd43GstCXZ3WNmSFtGT9wi59qQTW6mhTR5LQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/esbuild": { + "version": "0.27.7", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.27.7.tgz", + "integrity": "sha512-IxpibTjyVnmrIQo5aqNpCgoACA/dTKLTlhMHihVHhdkxKyPO1uBBthumT0rdHmcsk9uMonIWS0m4FljWzILh3w==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.27.7", + "@esbuild/android-arm": "0.27.7", + "@esbuild/android-arm64": "0.27.7", + "@esbuild/android-x64": "0.27.7", + "@esbuild/darwin-arm64": "0.27.7", + "@esbuild/darwin-x64": "0.27.7", + "@esbuild/freebsd-arm64": "0.27.7", + "@esbuild/freebsd-x64": "0.27.7", + "@esbuild/linux-arm": "0.27.7", + "@esbuild/linux-arm64": "0.27.7", + "@esbuild/linux-ia32": "0.27.7", + "@esbuild/linux-loong64": "0.27.7", + "@esbuild/linux-mips64el": "0.27.7", + "@esbuild/linux-ppc64": "0.27.7", + "@esbuild/linux-riscv64": "0.27.7", + "@esbuild/linux-s390x": "0.27.7", + "@esbuild/linux-x64": "0.27.7", + "@esbuild/netbsd-arm64": "0.27.7", + "@esbuild/netbsd-x64": "0.27.7", + "@esbuild/openbsd-arm64": "0.27.7", + "@esbuild/openbsd-x64": "0.27.7", + "@esbuild/openharmony-arm64": "0.27.7", + "@esbuild/sunos-x64": "0.27.7", + "@esbuild/win32-arm64": "0.27.7", + "@esbuild/win32-ia32": "0.27.7", + "@esbuild/win32-x64": "0.27.7" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/lightningcss": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz", + "integrity": "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==", + "dev": true, + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.32.0", + "lightningcss-darwin-arm64": "1.32.0", + "lightningcss-darwin-x64": "1.32.0", + "lightningcss-freebsd-x64": "1.32.0", + "lightningcss-linux-arm-gnueabihf": "1.32.0", + "lightningcss-linux-arm64-gnu": "1.32.0", + "lightningcss-linux-arm64-musl": "1.32.0", + "lightningcss-linux-x64-gnu": "1.32.0", + "lightningcss-linux-x64-musl": "1.32.0", + "lightningcss-win32-arm64-msvc": "1.32.0", + "lightningcss-win32-x64-msvc": "1.32.0" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz", + "integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz", + "integrity": "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz", + "integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz", + "integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz", + "integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz", + "integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz", + "integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz", + "integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz", + "integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz", + "integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz", + "integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/nanoid": { + "version": "3.3.12", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.12.tgz", + "integrity": "sha512-ZB9RH/39qpq5Vu6Y+NmUaFhQR6pp+M2Xt76XBnEwDaGcVAqhlvxrl3B2bKS5D3NH3QR76v3aSrKaF/Kiy7lEtQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.14", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.14.tgz", + "integrity": "sha512-SoSL4+OSEtR99LHFZQiJLkT59C5B1amGO1NzTwj7TT1qCUgUO6hxOvzkOYxD+vMrXBM3XJIKzokoERdqQq/Zmg==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/rolldown": { + "version": "1.0.0-rc.17", + "resolved": "https://registry.npmjs.org/rolldown/-/rolldown-1.0.0-rc.17.tgz", + "integrity": "sha512-ZrT53oAKrtA4+YtBWPQbtPOxIbVDbxT0orcYERKd63VJTF13zPcgXTvD4843L8pcsI7M6MErt8QtON6lrB9tyA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@oxc-project/types": "=0.127.0", + "@rolldown/pluginutils": "1.0.0-rc.17" + }, + "bin": { + "rolldown": "bin/cli.mjs" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "optionalDependencies": { + "@rolldown/binding-android-arm64": "1.0.0-rc.17", + "@rolldown/binding-darwin-arm64": "1.0.0-rc.17", + "@rolldown/binding-darwin-x64": "1.0.0-rc.17", + "@rolldown/binding-freebsd-x64": "1.0.0-rc.17", + "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.17", + "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.17", + "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.17", + "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.17", + "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.17", + "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.17", + "@rolldown/binding-linux-x64-musl": "1.0.0-rc.17", + "@rolldown/binding-openharmony-arm64": "1.0.0-rc.17", + "@rolldown/binding-wasm32-wasi": "1.0.0-rc.17", + "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.17", + "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.17" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-4.1.0.tgz", + "integrity": "sha512-Rq7ybcX2RuC55r9oaPVEW7/xu3tj8u4GeBYHBWCychFtzMIr86A7e3PPEBPT37sHStKX3+TiX/Fr/ACmJLVlLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.1.2.tgz", + "integrity": "sha512-dAqSqE/RabpBKI8+h26GfLq6Vb3JVXs30XYQjdMjaj/c2tS8IYYMbIzP599KtRj7c57/wYApb3QjgRgXmrCukA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", + "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyrainbow": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.1.0.tgz", + "integrity": "sha512-Bf+ILmBgretUrdJxzXM0SgXLZ3XfiaUuOj/IKQHuTXip+05Xn+uyEYdVg0kYDipTBcLrCVyUzAPz7QmArb0mmw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD", + "optional": true + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "8.0.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-8.0.10.tgz", + "integrity": "sha512-rZuUu9j6J5uotLDs+cAA4O5H4K1SfPliUlQwqa6YEwSrWDZzP4rhm00oJR5snMewjxF5V/K3D4kctsUTsIU9Mw==", + "dev": true, + "license": "MIT", + "dependencies": { + "lightningcss": "^1.32.0", + "picomatch": "^4.0.4", + "postcss": "^8.5.10", + "rolldown": "1.0.0-rc.17", + "tinyglobby": "^0.2.16" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "@vitejs/devtools": "^0.1.0", + "esbuild": "^0.27.0 || ^0.28.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "@vitejs/devtools": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitest": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.1.5.tgz", + "integrity": "sha512-9Xx1v3/ih3m9hN+SbfkUyy0JAs72ap3r7joc87XL6jwF0jGg6mFBvQ1SrwaX+h8BlkX6Hz9shdd1uo6AF+ZGpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.1.5", + "@vitest/mocker": "4.1.5", + "@vitest/pretty-format": "4.1.5", + "@vitest/runner": "4.1.5", + "@vitest/snapshot": "4.1.5", + "@vitest/spy": "4.1.5", + "@vitest/utils": "4.1.5", + "es-module-lexer": "^2.0.0", + "expect-type": "^1.3.0", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^4.0.0-rc.1", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.1.0", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.1.5", + "@vitest/browser-preview": "4.1.5", + "@vitest/browser-webdriverio": "4.1.5", + "@vitest/coverage-istanbul": "4.1.5", + "@vitest/coverage-v8": "4.1.5", + "@vitest/ui": "4.1.5", + "happy-dom": "*", + "jsdom": "*", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/coverage-istanbul": { + "optional": true + }, + "@vitest/coverage-v8": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + }, + "vite": { + "optional": false + } + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + } + } +} diff --git a/plugins/release-nyaa/package.json b/plugins/release-nyaa/package.json new file mode 100644 index 00000000..b5c98456 --- /dev/null +++ b/plugins/release-nyaa/package.json @@ -0,0 +1,52 @@ +{ + "name": "@ashdev/codex-plugin-release-nyaa", + "version": "1.18.0", + "description": "Nyaa.si uploader-feed release-source plugin for Codex - announces torrent releases for tracked series, filtered by an admin allowlist of trusted uploaders", + "main": "dist/index.js", + "bin": "dist/index.js", + "type": "module", + "files": [ + "dist", + "README.md" + ], + "repository": { + "type": "git", + "url": "https://github.com/AshDevFr/codex.git", + "directory": "plugins/release-nyaa" + }, + "scripts": { + "build": "esbuild src/index.ts --bundle --platform=node --target=node22 --format=esm --outfile=dist/index.js --sourcemap --banner:js='#!/usr/bin/env node'", + "dev": "npm run build -- --watch", + "clean": "rm -rf dist", + "start": "node dist/index.js", + "lint": "biome check .", + "lint:fix": "biome check --write .", + "typecheck": "tsc --noEmit", + "test": "vitest run --passWithNoTests", + "test:watch": "vitest", + "prepublishOnly": "npm run lint && npm run build" + }, + "keywords": [ + "codex", + "plugin", + "nyaa", + "release-source", + "manga", + "torrent" + ], + "author": "Codex", + "license": "MIT", + "engines": { + "node": ">=22.0.0" + }, + "dependencies": { + "@ashdev/codex-plugin-sdk": "file:../sdk-typescript" + }, + "devDependencies": { + "@biomejs/biome": "^2.4.4", + "@types/node": "^22.0.0", + "esbuild": "^0.27.3", + "typescript": "^5.9.3", + "vitest": "^4.0.18" + } +} diff --git a/plugins/release-nyaa/src/fetcher.test.ts b/plugins/release-nyaa/src/fetcher.test.ts new file mode 100644 index 00000000..558ac30e --- /dev/null +++ b/plugins/release-nyaa/src/fetcher.test.ts @@ -0,0 +1,165 @@ +import { describe, expect, it, vi } from "vitest"; +import { + feedUrl, + fetchSubscriptionFeed, + parseSubscriptionList, + parseSubscriptionToken, +} from "./fetcher.js"; + +// ----------------------------------------------------------------------------- +// parseSubscriptionToken / parseSubscriptionList +// ----------------------------------------------------------------------------- + +describe("parseSubscriptionToken", () => { + it("returns null for empty / whitespace-only input", () => { + expect(parseSubscriptionToken("")).toBeNull(); + expect(parseSubscriptionToken(" ")).toBeNull(); + }); + + it("treats a bare identifier as a user feed", () => { + expect(parseSubscriptionToken("1r0n")).toEqual({ kind: "user", identifier: "1r0n" }); + }); + + it("treats `q:` as a search query", () => { + expect(parseSubscriptionToken("q:LuminousScans")).toEqual({ + kind: "query", + identifier: "LuminousScans", + }); + }); + + it("treats `query:` (long form) as a search query", () => { + expect(parseSubscriptionToken("query:Manga Group")).toEqual({ + kind: "query", + identifier: "Manga Group", + }); + }); + + it("rejects an empty query body", () => { + expect(parseSubscriptionToken("q:")).toBeNull(); + expect(parseSubscriptionToken("query: ")).toBeNull(); + }); +}); + +describe("parseSubscriptionList", () => { + it("parses a comma-separated list and dedupes (case-insensitive)", () => { + const list = parseSubscriptionList("1r0n, TankobonBlur ,1r0n,q:LuminousScans"); + expect(list).toEqual([ + { kind: "user", identifier: "1r0n" }, + { kind: "user", identifier: "TankobonBlur" }, + { kind: "query", identifier: "LuminousScans" }, + ]); + }); + + it("returns an empty list for non-string input", () => { + expect(parseSubscriptionList(undefined)).toEqual([]); + expect(parseSubscriptionList(null)).toEqual([]); + expect(parseSubscriptionList(42)).toEqual([]); + }); + + it("drops empty tokens (trailing comma, double commas)", () => { + expect(parseSubscriptionList(",,,foo,,,bar,,")).toEqual([ + { kind: "user", identifier: "foo" }, + { kind: "user", identifier: "bar" }, + ]); + }); +}); + +// ----------------------------------------------------------------------------- +// feedUrl +// ----------------------------------------------------------------------------- + +describe("feedUrl", () => { + it("builds a user-feed URL", () => { + const url = feedUrl({ kind: "user", identifier: "1r0n" }); + expect(url).toBe("https://nyaa.si/?page=rss&u=1r0n"); + }); + + it("builds a search-feed URL with URL-encoded query", () => { + const url = feedUrl({ kind: "query", identifier: "Luminous Scans" }); + expect(url).toBe("https://nyaa.si/?page=rss&q=Luminous%20Scans"); + }); + + it("respects a custom base URL with trailing slash trimming", () => { + const url = feedUrl({ kind: "user", identifier: "x" }, "https://mirror.example/"); + expect(url).toBe("https://mirror.example/?page=rss&u=x"); + }); +}); + +// ----------------------------------------------------------------------------- +// fetchSubscriptionFeed +// ----------------------------------------------------------------------------- + +function stubResponse(status: number, body = "", headers: Record = {}): Response { + const h = new Headers(headers); + return { + status, + statusText: "", + headers: h, + text: async () => body, + } as unknown as Response; +} + +describe("fetchSubscriptionFeed", () => { + it("returns ok with body, etag, and last-modified on 200", async () => { + const fetchImpl = vi.fn().mockResolvedValue( + stubResponse(200, "", { + etag: '"v1"', + "last-modified": "Mon, 04 May 2026 02:31:00 GMT", + }), + ); + const r = await fetchSubscriptionFeed({ kind: "user", identifier: "1r0n" }, null, null, { + fetchImpl: fetchImpl as unknown as typeof fetch, + }); + expect(r.kind).toBe("ok"); + if (r.kind !== "ok") return; + expect(r.body).toBe(""); + expect(r.etag).toBe('"v1"'); + expect(r.lastModified).toBe("Mon, 04 May 2026 02:31:00 GMT"); + }); + + it("returns notModified on 304", async () => { + const fetchImpl = vi.fn().mockResolvedValue(stubResponse(304)); + const r = await fetchSubscriptionFeed({ kind: "user", identifier: "1r0n" }, '"v1"', null, { + fetchImpl: fetchImpl as unknown as typeof fetch, + }); + expect(r.kind).toBe("notModified"); + }); + + it("forwards 429 / 5xx as an error result with the upstream status", async () => { + const fetchImpl = vi.fn().mockResolvedValue(stubResponse(429)); + const r = await fetchSubscriptionFeed({ kind: "user", identifier: "1r0n" }, null, null, { + fetchImpl: fetchImpl as unknown as typeof fetch, + }); + expect(r.kind).toBe("error"); + if (r.kind !== "error") return; + expect(r.status).toBe(429); + }); + + it("returns status=0 on transport error / abort", async () => { + const fetchImpl = vi.fn().mockRejectedValue(new Error("network down")); + const r = await fetchSubscriptionFeed({ kind: "user", identifier: "1r0n" }, null, null, { + fetchImpl: fetchImpl as unknown as typeof fetch, + }); + expect(r.kind).toBe("error"); + if (r.kind !== "error") return; + expect(r.status).toBe(0); + expect(r.message).toContain("network down"); + }); + + it("attaches If-None-Match and If-Modified-Since headers when previous values are passed", async () => { + const fetchImpl = vi.fn().mockResolvedValue(stubResponse(200, "")); + await fetchSubscriptionFeed( + { kind: "user", identifier: "1r0n" }, + '"v1"', + "Sat, 01 May 2026 00:00:00 GMT", + { fetchImpl: fetchImpl as unknown as typeof fetch }, + ); + const callArgs = fetchImpl.mock.calls[0]; + expect(callArgs).toBeDefined(); + if (!callArgs) return; + const [, init] = callArgs as [string, RequestInit]; + const headers = init.headers as Record; + expect(headers["If-None-Match"]).toBe('"v1"'); + expect(headers["If-Modified-Since"]).toBe("Sat, 01 May 2026 00:00:00 GMT"); + }); +}); diff --git a/plugins/release-nyaa/src/fetcher.ts b/plugins/release-nyaa/src/fetcher.ts new file mode 100644 index 00000000..cef7a15d --- /dev/null +++ b/plugins/release-nyaa/src/fetcher.ts @@ -0,0 +1,167 @@ +/** + * Nyaa.si RSS fetcher. + * + * Wraps `fetch` with conditional GET (`If-None-Match` from a stored ETag, plus + * `If-Modified-Since` from a stored Last-Modified header) and a hard timeout. + * + * Nyaa exposes two feed shapes we care about: + * - User feed: `https://nyaa.si/?page=rss&u=` + * - Search feed: `https://nyaa.si/?page=rss&q=` (with optional + * filters; the plugin keeps it simple and lets aliases + * do the matching) + * + * Returns a discriminated result so the caller can: + * - act on `200`: parse the body, persist the new ETag. + * - skip parse on `304`: nothing changed since last poll. + * - report `429` / `5xx` upstream-status codes back to the host so the + * per-host backoff layer can react. + * + * Network is the only side effect; nothing in here touches storage, the host, + * or process state. That keeps it trivially testable: pass a mocked `fetch` + * implementation and assert. + */ + +/** Discriminated fetch result. */ +export type FetchResult = + | { kind: "ok"; body: string; etag: string | null; lastModified: string | null; status: 200 } + | { kind: "notModified"; status: 304 } + | { kind: "error"; status: number; message: string }; + +export interface FetcherOptions { + /** Custom `fetch` impl (for testing). Defaults to global `fetch`. */ + fetchImpl?: typeof fetch; + /** Per-request timeout. Defaults to 10s. */ + timeoutMs?: number; + /** Override base URL (for tests / mirrors). Defaults to `https://nyaa.si`. */ + baseUrl?: string; +} + +/** Default Nyaa base URL. */ +export const NYAA_BASE_URL = "https://nyaa.si"; + +/** + * One uploader subscription entry. Either a Nyaa username (`kind: "user"`) or + * an arbitrary search query (`kind: "query"`) for groups without an account. + */ +export type UploaderSubscription = + | { kind: "user"; identifier: string } + | { kind: "query"; identifier: string }; + +/** + * Parse a single uploader subscription token. + * + * Tokens look like: + * - `1r0n` → user + * - `q:LuminousScans` → query + * - `query:Manga Group` → query (long form) + * + * Empty / whitespace-only tokens return null (caller should drop them). + */ +export function parseSubscriptionToken(raw: string): UploaderSubscription | null { + const trimmed = raw.trim(); + if (trimmed.length === 0) return null; + + // `q:` / `query:` prefix → arbitrary search query. We match the prefix + // separately from the body so an empty query (`q:`, `query: `) returns + // null rather than falling through to "user". + const prefixMatch = trimmed.match(/^(q|query):(.*)$/i); + if (prefixMatch) { + const q = (prefixMatch[2] ?? "").trim(); + if (q.length === 0) return null; + return { kind: "query", identifier: q }; + } + + // Plain identifier → username feed. + return { kind: "user", identifier: trimmed }; +} + +/** + * Parse the admin `uploaders` CSV into a clean list of subscriptions. + * Skips empty tokens; preserves order; deduplicates. + */ +export function parseSubscriptionList(raw: unknown): UploaderSubscription[] { + if (typeof raw !== "string") return []; + const seen = new Set(); + const out: UploaderSubscription[] = []; + for (const token of raw.split(",")) { + const sub = parseSubscriptionToken(token); + if (sub === null) continue; + const key = `${sub.kind}:${sub.identifier.toLowerCase()}`; + if (seen.has(key)) continue; + seen.add(key); + out.push(sub); + } + return out; +} + +/** Build the per-subscription RSS URL. */ +export function feedUrl( + subscription: UploaderSubscription, + baseUrl: string = NYAA_BASE_URL, +): string { + const base = baseUrl.replace(/\/+$/, ""); + if (subscription.kind === "user") { + return `${base}/?page=rss&u=${encodeURIComponent(subscription.identifier)}`; + } + return `${base}/?page=rss&q=${encodeURIComponent(subscription.identifier)}`; +} + +/** + * Conditional GET against an uploader-subscription RSS feed. + * + * @param subscription - The uploader subscription to fetch. + * @param previousEtag - The ETag from the previous successful poll (if any). + * @param previousLastModified - Optional Last-Modified header from the previous + * poll. Nyaa often returns one but doesn't always honor `If-None-Match`; + * sending both maximizes 304 hit rate. + * @param opts - Fetcher options (custom fetch, timeout, base URL override). + */ +export async function fetchSubscriptionFeed( + subscription: UploaderSubscription, + previousEtag: string | null, + previousLastModified: string | null, + opts: FetcherOptions = {}, +): Promise { + const fetchImpl = opts.fetchImpl ?? globalThis.fetch; + const timeoutMs = opts.timeoutMs ?? 10_000; + const baseUrl = opts.baseUrl ?? NYAA_BASE_URL; + + const url = feedUrl(subscription, baseUrl); + const headers: Record = { + Accept: "application/rss+xml, application/xml;q=0.9, */*;q=0.5", + "User-Agent": "Codex-ReleaseTracker/1.0 (+https://github.com/AshDevFr/codex)", + }; + if (previousEtag) { + headers["If-None-Match"] = previousEtag; + } + if (previousLastModified) { + headers["If-Modified-Since"] = previousLastModified; + } + + const signal = AbortSignal.timeout(timeoutMs); + + let resp: Response; + try { + resp = await fetchImpl(url, { method: "GET", headers, signal }); + } catch (err) { + const msg = err instanceof Error ? err.message : "Unknown fetch error"; + return { kind: "error", status: 0, message: msg }; + } + + if (resp.status === 304) { + return { kind: "notModified", status: 304 }; + } + + if (resp.status === 200) { + const body = await resp.text(); + const etag = resp.headers.get("etag"); + const lastModified = resp.headers.get("last-modified"); + return { kind: "ok", body, etag, lastModified, status: 200 }; + } + + return { + kind: "error", + status: resp.status, + message: `upstream returned ${resp.status} ${resp.statusText}`, + }; +} diff --git a/plugins/release-nyaa/src/index.test.ts b/plugins/release-nyaa/src/index.test.ts new file mode 100644 index 00000000..be4a69a3 --- /dev/null +++ b/plugins/release-nyaa/src/index.test.ts @@ -0,0 +1,252 @@ +import { HostRpcClient } from "@ashdev/codex-plugin-sdk"; +import { describe, expect, it, vi } from "vitest"; +import { pollSubscription } from "./index.js"; +import type { AliasCandidate } from "./matcher.js"; + +// ----------------------------------------------------------------------------- +// Helpers — mirrors the makeMockRpc shape used by release-mangaupdates so the +// two suites stay readable side-by-side. +// ----------------------------------------------------------------------------- + +interface CapturedCall { + method: string; + params: unknown; +} + +function makeMockRpc(respond: (method: string, params: unknown) => unknown): { + rpc: HostRpcClient; + calls: CapturedCall[]; +} { + const calls: CapturedCall[] = []; + // eslint-disable-next-line prefer-const + let rpc: HostRpcClient; + const writeFn = (line: string) => { + const req = JSON.parse(line.trim()) as { + id: number; + method: string; + params: unknown; + }; + calls.push({ method: req.method, params: req.params }); + let result: unknown; + let error: { code: number; message: string } | null = null; + try { + result = respond(req.method, req.params); + } catch (err) { + error = { + code: -32_000, + message: err instanceof Error ? err.message : "synthetic error", + }; + } + setImmediate(() => { + const payload = error + ? { jsonrpc: "2.0", id: req.id, error } + : { jsonrpc: "2.0", id: req.id, result }; + rpc.handleResponse(JSON.stringify(payload)); + }); + }; + rpc = new HostRpcClient(writeFn); + return { rpc, calls }; +} + +function mockFetchOk(body: string, etag?: string): typeof fetch { + return vi.fn().mockResolvedValue( + new Response(body, { + status: 200, + headers: etag ? { etag } : {}, + }), + ) as unknown as typeof fetch; +} + +function stubResponse(status: number, body = "", headers: Record = {}): Response { + const h = new Headers(headers); + return { + status, + statusText: "", + headers: h, + text: async () => body, + } as unknown as Response; +} + +// ----------------------------------------------------------------------------- +// Fixtures — uses the user's 1r0n example shapes. +// ----------------------------------------------------------------------------- + +const uploaderFeedXml = ` + + + + <![CDATA[[1r0n] Boruto - Two Blue Vortex - Volume 02 (Digital) (1r0n)]]> + https://nyaa.si/download/1.torrent + https://nyaa.si/view/1 + Mon, 04 May 2026 02:31:00 GMT + aaa + + + <![CDATA[[1r0n] Dandadan c126-142 (Digital)]]> + https://nyaa.si/download/2.torrent + https://nyaa.si/view/2 + Sun, 03 May 2026 12:00:00 GMT + bbb + + + <![CDATA[[1r0n] Some Untracked Series v1 (Digital)]]> + https://nyaa.si/download/3.torrent + https://nyaa.si/view/3 + Sat, 02 May 2026 22:00:00 GMT + ccc + + +`; + +const trackedCandidates: AliasCandidate[] = [ + { seriesId: "s-boruto", aliases: ["Boruto: Two Blue Vortex", "Boruto Two Blue Vortex"] }, + { seriesId: "s-dandadan", aliases: ["Dandadan", "ダンダダン"] }, +]; + +// ----------------------------------------------------------------------------- +// pollSubscription +// ----------------------------------------------------------------------------- + +describe("pollSubscription", () => { + it("matches and records candidates for tracked series, skipping untracked", async () => { + const { rpc, calls } = makeMockRpc(() => ({ ledgerId: "ld", deduped: false })); + const out = await pollSubscription( + rpc, + "src-1", + { kind: "user", identifier: "1r0n" }, + trackedCandidates, + { + previousEtag: null, + timeoutMs: 1000, + minConfidence: 0.7, + fetchImpl: mockFetchOk(uploaderFeedXml, '"new-etag"'), + }, + ); + expect(out.fetched).toBe(true); + expect(out.notModified).toBe(false); + expect(out.parsed).toBe(3); + // Boruto + Dandadan match; "Some Untracked Series" doesn't. + expect(out.matched).toBe(2); + expect(out.recorded).toBe(2); + expect(out.etag).toBe('"new-etag"'); + + const recordCalls = calls.filter((c) => c.method === "releases/record"); + expect(recordCalls).toHaveLength(2); + const matched = recordCalls.map((c) => { + const p = c.params as { candidate: { seriesMatch: { codexSeriesId: string } } }; + return p.candidate.seriesMatch.codexSeriesId; + }); + expect(matched.sort()).toEqual(["s-boruto", "s-dandadan"]); + }); + + it("returns notModified when upstream replies 304", async () => { + const { rpc, calls } = makeMockRpc(() => ({ ledgerId: "x", deduped: false })); + const fetchImpl = vi.fn().mockResolvedValue(stubResponse(304)); + const out = await pollSubscription( + rpc, + "src-1", + { kind: "user", identifier: "1r0n" }, + trackedCandidates, + { + previousEtag: '"v1"', + timeoutMs: 1000, + minConfidence: 0.7, + fetchImpl: fetchImpl as unknown as typeof fetch, + }, + ); + expect(out.notModified).toBe(true); + expect(out.parsed).toBe(0); + expect(out.matched).toBe(0); + expect(out.upstreamStatus).toBe(304); + expect(calls.filter((c) => c.method === "releases/record")).toHaveLength(0); + }); + + it("propagates upstream 429 status without recording", async () => { + const { rpc, calls } = makeMockRpc(() => ({ ledgerId: "x", deduped: false })); + const fetchImpl = vi.fn().mockResolvedValue(stubResponse(429)); + const out = await pollSubscription( + rpc, + "src-1", + { kind: "user", identifier: "1r0n" }, + trackedCandidates, + { + previousEtag: null, + timeoutMs: 1000, + minConfidence: 0.7, + fetchImpl: fetchImpl as unknown as typeof fetch, + }, + ); + expect(out.fetched).toBe(false); + expect(out.upstreamStatus).toBe(429); + expect(calls.filter((c) => c.method === "releases/record")).toHaveLength(0); + }); + + it("attaches infoHash and format hints to the candidate payload", async () => { + const { rpc, calls } = makeMockRpc(() => ({ ledgerId: "ld", deduped: false })); + const fetchImpl = mockFetchOk(uploaderFeedXml); + await pollSubscription(rpc, "src-1", { kind: "user", identifier: "1r0n" }, trackedCandidates, { + previousEtag: null, + timeoutMs: 1000, + minConfidence: 0.7, + fetchImpl, + }); + const recordCalls = calls.filter((c) => c.method === "releases/record"); + const boruto = recordCalls.find((c) => { + const p = c.params as { candidate: { seriesMatch: { codexSeriesId: string } } }; + return p.candidate.seriesMatch.codexSeriesId === "s-boruto"; + }); + expect(boruto).toBeDefined(); + if (!boruto) return; + const params = boruto.params as { + candidate: { + infoHash: string | null; + formatHints: Record; + volume: number | null; + }; + }; + expect(params.candidate.infoHash).toBe("aaa"); + expect(params.candidate.formatHints.digital).toBe(true); + expect(params.candidate.formatHints.subscription).toBe("user:1r0n"); + expect(params.candidate.volume).toBe(2); + }); + + it("counts deduped records as not-newly-recorded", async () => { + const { rpc } = makeMockRpc(() => ({ ledgerId: "ld", deduped: true })); + const fetchImpl = mockFetchOk(uploaderFeedXml); + const out = await pollSubscription( + rpc, + "src-1", + { kind: "user", identifier: "1r0n" }, + trackedCandidates, + { + previousEtag: null, + timeoutMs: 1000, + minConfidence: 0.7, + fetchImpl, + }, + ); + expect(out.matched).toBe(2); + expect(out.recorded).toBe(0); + }); + + it("skips items with no alias match without recording", async () => { + const { rpc, calls } = makeMockRpc(() => ({ ledgerId: "x", deduped: false })); + const fetchImpl = mockFetchOk(uploaderFeedXml); + const out = await pollSubscription( + rpc, + "src-1", + { kind: "user", identifier: "1r0n" }, + [{ seriesId: "s-other", aliases: ["Completely Unrelated Manga"] }], + { + previousEtag: null, + timeoutMs: 1000, + minConfidence: 0.7, + fetchImpl, + }, + ); + expect(out.parsed).toBe(3); + expect(out.matched).toBe(0); + expect(out.recorded).toBe(0); + expect(calls.filter((c) => c.method === "releases/record")).toHaveLength(0); + }); +}); diff --git a/plugins/release-nyaa/src/index.ts b/plugins/release-nyaa/src/index.ts new file mode 100644 index 00000000..5784415e --- /dev/null +++ b/plugins/release-nyaa/src/index.ts @@ -0,0 +1,408 @@ +/** + * Nyaa.si Release-Source Plugin for Codex. + * + * Polls Nyaa user / search RSS feeds for an admin-configured uploader + * allowlist and announces new releases for tracked series. Matching is + * alias-based: each parsed Nyaa title is normalized and compared to every + * tracked series' alias list. Confidence is 0.95 on exact normalized match, + * dropping to a fuzzy floor of 0.7 for near-matches; below that, the + * candidate is silently dropped (the host's threshold would reject it + * anyway). + * + * Flow per `releases/poll`: + * 1. Read uploader subscriptions from admin config. + * 2. Pull tracked-series + aliases from the host + * (`releases/list_tracked`). + * 3. For each subscription, conditional GET the RSS feed (ETag stored on + * the source row; we don't have per-subscription state slots). + * 4. Parse each item; match against tracked aliases; emit a candidate via + * `releases/record`. + * 5. Aggregate the worst upstream status across all subscriptions for the + * host's per-host backoff layer. + * + * Design notes: + * - **One source row, many uploaders.** The plan calls for "one source + * row per uploader", but the host has no admin endpoint for creating + * `release_sources` rows; admins create one row when enabling the + * plugin and the plugin walks all subscriptions during a single poll. + * Mirrors how MangaUpdates polls all tracked series within one source + * row's `poll(sourceId)` call. + * - **ETag is a single bucket.** The source row stores one ETag — we use + * it on the *first* uploader fetched and rotate fresh ETags out of the + * response on subsequent polls. Daily polls + small RSS bodies make + * this acceptable; per-subscription ETags would need per-(source, + * subscription) state, deferred. + */ + +import { + createLogger, + createReleaseSourcePlugin, + type HostRpcClient, + HostRpcError, + type InitializeParams, + RELEASES_METHODS, + type ReleaseCandidate, + type ReleasePollRequest, + type ReleasePollResponse, + type TrackedSeriesEntry, +} from "@ashdev/codex-plugin-sdk"; +import { + fetchSubscriptionFeed, + parseSubscriptionList, + type UploaderSubscription, +} from "./fetcher.js"; +import { + DEFAULT_MIN_CONFIDENCE, + DEFAULT_POLL_INTERVAL_S, + DEFAULT_REQUEST_TIMEOUT_MS, + manifest, +} from "./manifest.js"; +import { type AliasCandidate, type AliasMatch, matchSeries } from "./matcher.js"; +import { type ParsedRssItem, parseFeed } from "./parser.js"; + +const logger = createLogger({ name: manifest.name, level: "info" }); + +// ============================================================================= +// Plugin-level state (set during initialize) +// ============================================================================= + +interface PluginState { + hostRpc: HostRpcClient | null; + /** Parsed admin uploader subscription list. */ + subscriptions: UploaderSubscription[]; + /** Hard timeout for upstream fetches. */ + requestTimeoutMs: number; + /** Minimum confidence floor — passed to the matcher's `fuzzyFloor`. */ + minConfidence: number; + /** Override base URL (for tests / mirrors). */ + baseUrl: string | null; +} + +const state: PluginState = { + hostRpc: null, + subscriptions: [], + requestTimeoutMs: DEFAULT_REQUEST_TIMEOUT_MS, + minConfidence: DEFAULT_MIN_CONFIDENCE, + baseUrl: null, +}; + +/** Reset state. Exported for tests; not part of the plugin contract. */ +export function _resetState(): void { + state.hostRpc = null; + state.subscriptions = []; + state.requestTimeoutMs = DEFAULT_REQUEST_TIMEOUT_MS; + state.minConfidence = DEFAULT_MIN_CONFIDENCE; + state.baseUrl = null; +} + +// ============================================================================= +// Reverse-RPC wrappers +// ============================================================================= + +interface ListTrackedResponse { + tracked: TrackedSeriesEntry[]; + nextOffset?: number; +} + +interface RecordResponse { + ledgerId: string; + deduped: boolean; +} + +async function listTracked( + rpc: HostRpcClient, + sourceId: string, + offset: number, + limit: number, +): Promise { + return rpc.call(RELEASES_METHODS.LIST_TRACKED, { + sourceId, + offset, + limit, + }); +} + +async function recordCandidate( + rpc: HostRpcClient, + sourceId: string, + candidate: ReleaseCandidate, +): Promise { + try { + return await rpc.call(RELEASES_METHODS.RECORD, { + sourceId, + candidate, + }); + } catch (err) { + if (err instanceof HostRpcError) { + logger.warn( + `record failed for ${candidate.externalReleaseId}: ${err.message} (code ${err.code})`, + ); + } else { + const msg = err instanceof Error ? err.message : "unknown error"; + logger.warn(`record failed for ${candidate.externalReleaseId}: ${msg}`); + } + return null; + } +} + +// ============================================================================= +// Iteration helpers +// ============================================================================= + +/** + * Pull every tracked-series page from the host. We can't stream + * subscription-by-subscription because each Nyaa item has to be matched + * against the *full* alias set; partial pages would leak misses. + */ +export async function fetchAllTracked( + rpc: HostRpcClient, + sourceId: string, +): Promise { + const out: AliasCandidate[] = []; + const pageSize = 200; + let offset = 0; + while (true) { + const page = await listTracked(rpc, sourceId, offset, pageSize); + for (const entry of page.tracked) { + const aliases = entry.aliases ?? []; + // Drop entries with no aliases — Nyaa matching is alias-only. + if (aliases.length === 0) continue; + out.push({ seriesId: entry.seriesId, aliases }); + } + if (page.nextOffset === undefined || page.tracked.length === 0) return out; + offset = page.nextOffset; + } +} + +// ============================================================================= +// Per-subscription poll +// ============================================================================= + +/** Outcome of a single per-subscription fetch+parse cycle. */ +export interface SubscriptionPollOutcome { + subscription: UploaderSubscription; + fetched: boolean; + notModified: boolean; + parsed: number; + matched: number; + recorded: number; + upstreamStatus: number; + /** New ETag returned by upstream (only set when fetched=true). */ + etag: string | null; + error: string; +} + +/** + * Build a `ReleaseCandidate` from a parsed RSS item + the matcher's verdict. + * + * Language is hardcoded to `"en"` — Nyaa releases don't carry a language tag + * in the title or RSS metadata. English-only is the right default for the + * uploader allowlist this plugin is designed around (`1r0n`, etc.); admins + * who add non-English uploaders should configure tracked series' languages + * accordingly. The host's `latest_known_*` advance gate enforces the + * per-series language list. + */ +function toCandidate( + match: AliasMatch, + item: ParsedRssItem, + subscription: UploaderSubscription, +): ReleaseCandidate { + const formatHints: Record = { ...item.formatHints }; + if (item.chapterRangeEnd !== null) { + formatHints.chapterRangeEnd = item.chapterRangeEnd; + } + if (item.volumeRangeEnd !== null) { + formatHints.volumeRangeEnd = item.volumeRangeEnd; + } + formatHints.subscription = `${subscription.kind}:${subscription.identifier}`; + + return { + seriesMatch: { + codexSeriesId: match.seriesId, + confidence: match.confidence, + reason: match.reason, + }, + externalReleaseId: item.externalReleaseId, + chapter: item.chapter, + volume: item.volume, + language: "en", + groupOrUploader: item.group ?? (subscription.kind === "user" ? subscription.identifier : null), + payloadUrl: item.link.length > 0 ? item.link : `urn:nyaa:${item.externalReleaseId}`, + infoHash: item.infoHash, + formatHints, + observedAt: item.observedAt, + }; +} + +/** + * Poll a single uploader subscription. Internal — exposed for testing. + */ +export async function pollSubscription( + rpc: HostRpcClient, + sourceId: string, + subscription: UploaderSubscription, + candidates: AliasCandidate[], + options: { + previousEtag: string | null; + timeoutMs: number; + minConfidence: number; + baseUrl?: string | null; + fetchImpl?: typeof fetch; + }, +): Promise { + const result = await fetchSubscriptionFeed(subscription, options.previousEtag, null, { + fetchImpl: options.fetchImpl, + timeoutMs: options.timeoutMs, + ...(options.baseUrl ? { baseUrl: options.baseUrl } : {}), + }); + + if (result.kind === "notModified") { + return { + subscription, + fetched: true, + notModified: true, + parsed: 0, + matched: 0, + recorded: 0, + upstreamStatus: 304, + etag: null, + error: "", + }; + } + + if (result.kind === "error") { + return { + subscription, + fetched: false, + notModified: false, + parsed: 0, + matched: 0, + recorded: 0, + upstreamStatus: result.status, + etag: null, + error: result.message, + }; + } + + // result.kind === "ok" + const items = parseFeed(result.body); + let matched = 0; + let recorded = 0; + for (const item of items) { + const m = matchSeries(item.seriesGuess, candidates, { + fuzzyFloor: options.minConfidence, + }); + if (m === null) continue; + matched++; + const candidate = toCandidate(m, item, subscription); + const outcome = await recordCandidate(rpc, sourceId, candidate); + if (outcome && !outcome.deduped) recorded++; + } + return { + subscription, + fetched: true, + notModified: false, + parsed: items.length, + matched, + recorded, + upstreamStatus: 200, + etag: result.etag, + error: "", + }; +} + +// ============================================================================= +// Top-level poll handler +// ============================================================================= + +async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise { + const sourceId = params.sourceId; + + if (state.subscriptions.length === 0) { + logger.warn("no uploader subscriptions configured; nothing to poll"); + return { notModified: false, upstreamStatus: 200 }; + } + + // 1. Pull tracked-series + aliases. + const tracked = await fetchAllTracked(rpc, sourceId); + if (tracked.length === 0) { + logger.info(`no tracked series with aliases for source=${sourceId}`); + return { notModified: false, upstreamStatus: 200 }; + } + + let parsed = 0; + let matched = 0; + let recorded = 0; + let worstStatus = 200; + let lastEtag: string | null = null; + + // 2. Walk subscriptions in declaration order. We use the ETag stored on + // the source row (passed as `params.etag`) for the *first* fetch; + // subsequent fetches start fresh because the ETag belongs to whichever + // subscription was polled last, not this one. + let firstFetch = true; + for (const sub of state.subscriptions) { + const outcome = await pollSubscription(rpc, sourceId, sub, tracked, { + previousEtag: firstFetch ? (params.etag ?? null) : null, + timeoutMs: state.requestTimeoutMs, + minConfidence: state.minConfidence, + ...(state.baseUrl ? { baseUrl: state.baseUrl } : {}), + }); + firstFetch = false; + parsed += outcome.parsed; + matched += outcome.matched; + recorded += outcome.recorded; + if (outcome.upstreamStatus > worstStatus) worstStatus = outcome.upstreamStatus; + if (outcome.etag) lastEtag = outcome.etag; + if (outcome.error) { + logger.warn( + `subscription ${sub.kind}:${sub.identifier}: ${outcome.error} (status ${outcome.upstreamStatus})`, + ); + } + } + + logger.info( + `poll complete: source=${sourceId} subscriptions=${state.subscriptions.length} tracked=${tracked.length} parsed=${parsed} matched=${matched} recorded=${recorded} worst_status=${worstStatus}`, + ); + + return { + notModified: false, + upstreamStatus: worstStatus, + ...(lastEtag !== null ? { etag: lastEtag } : {}), + }; +} + +// ============================================================================= +// Plugin Initialization +// ============================================================================= + +createReleaseSourcePlugin({ + manifest, + provider: { + async poll(params: ReleasePollRequest): Promise { + if (!state.hostRpc) { + throw new Error("Plugin not initialized: hostRpc client missing"); + } + return poll(params, state.hostRpc); + }, + }, + logLevel: "info", + onInitialize(params: InitializeParams) { + state.hostRpc = params.hostRpc; + const ac = params.adminConfig ?? {}; + if (typeof ac.uploaders === "string") { + state.subscriptions = parseSubscriptionList(ac.uploaders); + } + if (typeof ac.requestTimeoutMs === "number" && Number.isFinite(ac.requestTimeoutMs)) { + state.requestTimeoutMs = Math.max(1_000, Math.min(ac.requestTimeoutMs, 60_000)); + } + if (typeof ac.baseUrl === "string" && ac.baseUrl.trim().length > 0) { + state.baseUrl = ac.baseUrl.trim(); + } + logger.info( + `initialized: subscriptions=${state.subscriptions.length} timeoutMs=${state.requestTimeoutMs} minConfidence=${state.minConfidence} defaultPoll=${DEFAULT_POLL_INTERVAL_S}s`, + ); + }, +}); + +logger.info("Nyaa release-source plugin started"); diff --git a/plugins/release-nyaa/src/manifest.ts b/plugins/release-nyaa/src/manifest.ts new file mode 100644 index 00000000..16dceb43 --- /dev/null +++ b/plugins/release-nyaa/src/manifest.ts @@ -0,0 +1,79 @@ +import type { PluginManifest } from "@ashdev/codex-plugin-sdk"; +import packageJson from "../package.json" with { type: "json" }; + +/** Default poll interval: 24 hours. Daily polls keep the per-uploader fan-out + * gentle and respect Nyaa's preference for low-frequency clients. */ +export const DEFAULT_POLL_INTERVAL_S = 86_400; + +/** Default per-fetch HTTP timeout. Nyaa is usually fast; 10s is generous. */ +export const DEFAULT_REQUEST_TIMEOUT_MS = 10_000; + +/** + * Default minimum confidence threshold for emitted candidates. Nyaa matches + * series via title parsing + alias comparison, which is fuzzier than the + * external-ID match used by MangaUpdates. The host's threshold (default 0.7) + * still filters at record time; this is the plugin-side floor below which we + * don't even bother calling `releases/record`. + */ +export const DEFAULT_MIN_CONFIDENCE = 0.7; + +export const manifest = { + name: "release-nyaa", + displayName: "Nyaa Releases", + version: packageJson.version, + description: + "Announces new chapter / volume torrents for tracked series via Nyaa.si uploader RSS feeds. Limited to an admin-configured uploader allowlist; matches via title aliases.", + author: "Codex", + homepage: "https://github.com/AshDevFr/codex", + protocolVersion: "1.1", + capabilities: { + releaseSource: { + kinds: ["rss-uploader"], + requiresAliases: true, + canAnnounceChapters: true, + canAnnounceVolumes: true, + defaultPollIntervalS: DEFAULT_POLL_INTERVAL_S, + }, + }, + configSchema: { + description: + "Nyaa plugin configuration. The plugin polls the listed uploaders' RSS feeds (or, for groups without a Nyaa account, a fallback search query) and emits release candidates only for tracked series whose aliases match the parsed title. Notification-only: Codex never downloads torrents.", + fields: [ + { + key: "uploaders", + label: "Uploader Subscriptions", + description: + "Comma-separated list of trusted uploader handles or queries. Each entry is either `username` (a Nyaa user feed) or `q:` (a fallback site-wide search filter, useful for groups without a dedicated account). Confidence stays above the rejection threshold only for entries that match a tracked series alias.", + type: "string" as const, + required: false, + default: "", + example: "1r0n,TankobonBlur,q:LuminousScans", + }, + { + key: "requestTimeoutMs", + label: "Request Timeout (ms)", + description: + "How long to wait for a single Nyaa RSS fetch before giving up. Defaults to 10000 (10 seconds).", + type: "number" as const, + required: false, + default: DEFAULT_REQUEST_TIMEOUT_MS, + }, + { + key: "baseUrl", + label: "Nyaa Base URL", + description: + "Override the Nyaa base URL. Useful for mirrors or for tests. Defaults to https://nyaa.si.", + type: "string" as const, + required: false, + default: "https://nyaa.si", + example: "https://nyaa.si", + }, + ], + }, + userDescription: + "Watches Nyaa.si uploader feeds for new releases of tracked series. Matches by title alias — make sure your series' aliases (auto-populated from metadata or added manually in the Tracking panel) cover the way the uploader names them. Notification-only — Codex never downloads anything.", + adminSetupInstructions: + "1. Configure the Uploader Subscriptions field with a comma-separated list of trusted uploader handles (e.g. `1r0n,TankobonBlur`). Use `q:` for groups without a Nyaa account. 2. Make sure tracked series have aliases that match how the uploader names releases (e.g. include alternate spellings, romanizations, the volume-ranges tag uploaders use). 3. The plugin polls the uploader feeds at the configured interval; any release whose title matches a tracked alias is recorded as a candidate. Filtering by formats / `(Digital)` tag happens at parse time and is logged but doesn't reject candidates by default.", +} as const satisfies PluginManifest & { + capabilities: { releaseSource: { kinds: ["rss-uploader"] } }; +}; diff --git a/plugins/release-nyaa/src/matcher.test.ts b/plugins/release-nyaa/src/matcher.test.ts new file mode 100644 index 00000000..89eca26b --- /dev/null +++ b/plugins/release-nyaa/src/matcher.test.ts @@ -0,0 +1,130 @@ +import { describe, expect, it } from "vitest"; +import { + CONFIDENCE_EXACT, + DEFAULT_FUZZY_FLOOR, + diceRatio, + matchSeries, + normalizeAlias, +} from "./matcher.js"; + +// ----------------------------------------------------------------------------- +// normalizeAlias — must match the Rust `normalize_alias` impl +// ----------------------------------------------------------------------------- + +describe("normalizeAlias", () => { + it("lowercases and strips punctuation", () => { + expect(normalizeAlias("My Hero Academia!")).toBe("my hero academia"); + }); + + it("collapses multiple spaces, drops leading/trailing space", () => { + expect(normalizeAlias(" Berserk - Vol ")).toBe("berserk vol"); + }); + + it("strips colons and other ASCII punctuation (matches Rust impl)", () => { + expect(normalizeAlias("Re:Zero - Starting Life in Another World")).toBe( + "rezero starting life in another world", + ); + }); + + it("returns empty string for input with only punctuation", () => { + expect(normalizeAlias("!!! - ?!")).toBe(""); + }); + + it("preserves Unicode alphanumerics", () => { + expect(normalizeAlias("僕のヒーロー")).toBe("僕のヒーロー"); + }); +}); + +// ----------------------------------------------------------------------------- +// diceRatio — sanity checks +// ----------------------------------------------------------------------------- + +describe("diceRatio", () => { + it("returns 1.0 for identical strings", () => { + expect(diceRatio("boruto two blue vortex", "boruto two blue vortex")).toBe(1); + }); + + it("returns 0 for empty inputs", () => { + expect(diceRatio("", "x")).toBe(0); + expect(diceRatio("x", "")).toBe(0); + }); + + it("scores high for word-rearranged near-matches", () => { + const r = diceRatio("boruto two blue vortex", "boruto - two blue vortex"); + expect(r).toBeGreaterThan(0.85); + }); + + it("scores low for unrelated series", () => { + const r = diceRatio("naruto", "boruto two blue vortex"); + expect(r).toBeLessThan(0.5); + }); +}); + +// ----------------------------------------------------------------------------- +// matchSeries +// ----------------------------------------------------------------------------- + +describe("matchSeries", () => { + const candidates = [ + { seriesId: "s-boruto", aliases: ["Boruto: Two Blue Vortex", "Boruto - Two Blue Vortex"] }, + { seriesId: "s-onepiece", aliases: ["One Piece"] }, + { seriesId: "s-dandadan", aliases: ["Dandadan", "ダンダダン"] }, + ]; + + it("returns null for empty seriesGuess", () => { + expect(matchSeries("", candidates)).toBeNull(); + expect(matchSeries(" ", candidates)).toBeNull(); + }); + + it("returns null when there are no candidates", () => { + expect(matchSeries("Boruto", [])).toBeNull(); + }); + + it("emits an alias-exact match at CONFIDENCE_EXACT", () => { + const m = matchSeries("Boruto Two Blue Vortex", candidates); + expect(m).not.toBeNull(); + if (m === null) return; + expect(m.seriesId).toBe("s-boruto"); + expect(m.confidence).toBe(CONFIDENCE_EXACT); + expect(m.reason).toBe("alias-exact"); + expect(m.matchedAlias).toBe("Boruto: Two Blue Vortex"); + }); + + it("emits an alias-fuzzy match for a near-miss above the floor", () => { + // Add a slightly different aliasing form. + const c = [{ seriesId: "s-frieren", aliases: ["Sousou no Frieren"] }]; + const m = matchSeries("Sousou Frieren", c, { fuzzyFloor: DEFAULT_FUZZY_FLOOR }); + if (m === null) { + // Below floor is also fine for this test — exercise the explicit + // match-or-skip semantics rather than asserting a confidence value. + expect(m).toBeNull(); + return; + } + expect(m.seriesId).toBe("s-frieren"); + expect(m.reason).toBe("alias-fuzzy"); + expect(m.confidence).toBeGreaterThanOrEqual(DEFAULT_FUZZY_FLOOR); + expect(m.confidence).toBeLessThan(CONFIDENCE_EXACT); + }); + + it("rejects unrelated names below the dice floor", () => { + const m = matchSeries("Berserk", candidates); + expect(m).toBeNull(); + }); + + it("rejects matches whose Dice ratio is below MIN_DICE_RATIO even with a low floor", () => { + const c = [{ seriesId: "s-x", aliases: ["Berserk"] }]; + // Even with a permissive floor, the matcher still requires Dice ≥ 0.85. + const m = matchSeries("Naruto", c, { fuzzyFloor: 0.5 }); + expect(m).toBeNull(); + }); + + it("picks the best candidate when multiple are above the floor", () => { + const c = [ + { seriesId: "s-bad", aliases: ["Boruto Two Vortex"] }, // worse Dice + { seriesId: "s-good", aliases: ["Boruto Two Blue Vortex"] }, // exact match + ]; + const m = matchSeries("Boruto Two Blue Vortex", c); + expect(m?.seriesId).toBe("s-good"); + expect(m?.reason).toBe("alias-exact"); + }); +}); diff --git a/plugins/release-nyaa/src/matcher.ts b/plugins/release-nyaa/src/matcher.ts new file mode 100644 index 00000000..4aa87240 --- /dev/null +++ b/plugins/release-nyaa/src/matcher.ts @@ -0,0 +1,216 @@ +/** + * Alias matcher for Nyaa releases. + * + * Nyaa identifies series only by name in the torrent title — there's no + * `nyaa_id` or other stable external ID that ties a release to a specific + * series in our DB. So matching is a two-step pipeline: + * + * 1. Normalize the parsed `seriesGuess` and every alias the host returned + * to a common shape (lowercase, alphanumeric + spaces only). This + * mirrors the `normalize_alias` function on the host + * ([src/db/entities/series_aliases.rs](src/db/entities/series_aliases.rs)) + * so a release whose normalized title exactly matches one of a series' + * stored aliases lands at confidence 0.95. + * 2. If no exact match, compute a token-level Sørensen-Dice similarity + * against every candidate alias. The highest ratio wins, scaled into a + * 0.7..0.85 confidence band; below the configured threshold we skip. + * + * The Dice ratio is more forgiving than edit distance for word-rearranged + * titles (`"Boruto Two Blue Vortex"` vs. `"Boruto - Two Blue Vortex"`) while + * still rejecting unrelated series at the threshold. We deliberately don't + * wire a heavy fuzzy-match library; the surface area is small. + */ + +/** A tracked-series candidate with its raw aliases. */ +export interface AliasCandidate { + /** Codex series UUID. */ + seriesId: string; + /** Raw aliases from `releases/list_tracked`. */ + aliases: string[]; +} + +/** A successful match. */ +export interface AliasMatch { + seriesId: string; + confidence: number; + /** Reason string surfaced in the SeriesMatch — "alias-exact" or "alias-fuzzy". */ + reason: string; + /** The matched alias (raw form, for logging). */ + matchedAlias: string; +} + +/** + * Confidence assigned on an exact normalized match. + * + * Below 1.0 because we still don't have an external ID — a release titled + * `"X"` could legitimately match multiple series with that alias. The host's + * threshold treats this as a strong-but-not-certain signal. + */ +export const CONFIDENCE_EXACT = 0.95; + +/** + * Floor below which fuzzy matches don't get emitted. The host's default + * threshold is 0.7; we share that floor so plugin-side filtering doesn't + * silently second-guess host config. + */ +export const DEFAULT_FUZZY_FLOOR = 0.7; + +/** + * Anything below this Dice-coefficient is rejected outright (even before the + * confidence floor kicks in). 0.85 lets through "two-blue-vortex" vs. "two + * blue vortex" but kills "naruto" vs. "boruto two blue vortex". + */ +export const MIN_DICE_RATIO = 0.85; + +// --------------------------------------------------------------------------- +// Normalization +// --------------------------------------------------------------------------- + +/** + * Normalize an alias to the same shape the host stores in + * `series_aliases.normalized`. Mirrors the Rust `normalize_alias` impl — keep + * these in lockstep. + */ +export function normalizeAlias(input: string): string { + let out = ""; + let lastWasSpace = false; + for (const ch of input) { + // Match Rust's `is_alphanumeric()` (Unicode-aware). + if (/[\p{L}\p{N}]/u.test(ch)) { + out += ch.toLowerCase(); + lastWasSpace = false; + } else if (/\s/.test(ch) && out.length > 0 && !lastWasSpace) { + out += " "; + lastWasSpace = true; + } + // Anything else (punctuation, control, symbols) is dropped. + } + return out.endsWith(" ") ? out.slice(0, -1) : out; +} + +// --------------------------------------------------------------------------- +// Dice coefficient (token-level, character-bigram fallback) +// --------------------------------------------------------------------------- + +/** + * Sørensen-Dice coefficient on word-bigrams of the input strings (with a + * character-bigram fallback for short / single-word strings). + * + * Range: 0..1, where 1.0 means identical bigram sets. + */ +export function diceRatio(a: string, b: string): number { + if (a.length === 0 || b.length === 0) return 0; + if (a === b) return 1; + + const bigramsA = bigrams(a); + const bigramsB = bigrams(b); + if (bigramsA.size === 0 || bigramsB.size === 0) return 0; + + let intersection = 0; + for (const bg of bigramsA) { + if (bigramsB.has(bg)) intersection++; + } + return (2 * intersection) / (bigramsA.size + bigramsB.size); +} + +function bigrams(s: string): Set { + const out = new Set(); + // Word bigrams first. + const words = s.split(/\s+/).filter((w) => w.length > 0); + if (words.length >= 2) { + for (let i = 0; i < words.length - 1; i++) { + out.add(`${words[i]} ${words[i + 1]}`); + } + } + // Plus character bigrams to handle word-rearrangement and short strings. + const flat = s.replace(/\s+/g, ""); + if (flat.length >= 2) { + for (let i = 0; i < flat.length - 1; i++) { + out.add(`#${flat.slice(i, i + 2)}`); + } + } else if (flat.length === 1) { + out.add(`#${flat}`); + } + return out; +} + +// --------------------------------------------------------------------------- +// Public matching entry point +// --------------------------------------------------------------------------- + +export interface MatchOptions { + /** + * Minimum confidence for a fuzzy match to be returned. Defaults to + * `DEFAULT_FUZZY_FLOOR` (0.7). Below this, the matcher returns null. + */ + fuzzyFloor?: number; +} + +/** + * Match a parsed series-guess against a list of tracked-series candidates and + * their aliases. Returns the best match or null if nothing clears the floor. + * + * On an exact normalized match against any alias of a candidate, confidence + * is `CONFIDENCE_EXACT` (0.95). If multiple candidates have aliases that + * normalize to the same form, the first one wins — that's a data-quality + * issue the host surfaces via the `latest_known_*` advance gate, not + * something the matcher can untangle alone. + * + * On no exact match, the matcher computes Dice ratios across the cartesian + * product (candidates × aliases), finds the maximum, scales it from + * `[MIN_DICE_RATIO, 1.0]` into `[fuzzyFloor, 0.85]`, and returns a fuzzy + * match if the result is at or above the floor. + */ +export function matchSeries( + seriesGuess: string, + candidates: AliasCandidate[], + opts: MatchOptions = {}, +): AliasMatch | null { + const floor = opts.fuzzyFloor ?? DEFAULT_FUZZY_FLOOR; + const target = normalizeAlias(seriesGuess); + if (target.length === 0 || candidates.length === 0) return null; + + // Pass 1 — exact normalized match. + for (const c of candidates) { + for (const alias of c.aliases) { + if (normalizeAlias(alias) === target) { + return { + seriesId: c.seriesId, + confidence: CONFIDENCE_EXACT, + reason: "alias-exact", + matchedAlias: alias, + }; + } + } + } + + // Pass 2 — best fuzzy match. + let best: AliasMatch | null = null; + let bestRatio = 0; + for (const c of candidates) { + for (const alias of c.aliases) { + const ratio = diceRatio(target, normalizeAlias(alias)); + if (ratio > bestRatio) { + bestRatio = ratio; + best = { + seriesId: c.seriesId, + confidence: 0, + reason: "alias-fuzzy", + matchedAlias: alias, + }; + } + } + } + if (best === null || bestRatio < MIN_DICE_RATIO) return null; + + // Linearly scale [MIN_DICE_RATIO..1.0] → [fuzzyFloor..0.85]. + // (We cap the fuzzy ceiling below CONFIDENCE_EXACT so an alias-exact match + // is always strictly stronger than the best alias-fuzzy match.) + const ceiling = 0.85; + const span = 1 - MIN_DICE_RATIO; + const t = (bestRatio - MIN_DICE_RATIO) / span; // 0..1 inside the band + const confidence = floor + t * (ceiling - floor); + if (confidence < floor) return null; + best.confidence = Number(confidence.toFixed(4)); + return best; +} diff --git a/plugins/release-nyaa/src/parser.test.ts b/plugins/release-nyaa/src/parser.test.ts new file mode 100644 index 00000000..6332c694 --- /dev/null +++ b/plugins/release-nyaa/src/parser.test.ts @@ -0,0 +1,232 @@ +import { describe, expect, it } from "vitest"; +import { parseFeed, parseItem, parseTitle } from "./parser.js"; + +// ----------------------------------------------------------------------------- +// parseTitle — corpus mirroring real-world Nyaa titles, including the user's +// 1r0n / mixed-format examples that motivated this phase. +// ----------------------------------------------------------------------------- + +describe("parseTitle", () => { + it("parses a 1r0n volume release with leading group token and trailing tags", () => { + const t = parseTitle("[1r0n] Boruto - Two Blue Vortex - Volume 02 (Digital) (1r0n)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.group).toBe("1r0n"); + expect(t.volume).toBe(2); + expect(t.chapter).toBeNull(); + expect(t.formatHints.digital).toBe(true); + // Series guess strips group, volume token, and parenthesized tags. + expect(t.seriesGuess).toBe("Boruto Two Blue Vortex"); + }); + + it("parses a v107 short-form volume release", () => { + const t = parseTitle("[1r0n] One Piece v107 (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.volume).toBe(107); + expect(t.chapter).toBeNull(); + expect(t.formatHints.digital).toBe(true); + expect(t.seriesGuess).toBe("One Piece"); + }); + + it("parses a single chapter release with `Chapter NNN` long form", () => { + const t = parseTitle("[1r0n] Chainsaw Man - Chapter 142 (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.chapter).toBe(142); + expect(t.volume).toBeNull(); + expect(t.seriesGuess).toBe("Chainsaw Man"); + }); + + it("parses a chapter range (the screenshot's loose-chapter shape)", () => { + const t = parseTitle("[Group] Dandadan c126-142 (2024) (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.chapter).toBe(126); + expect(t.chapterRangeEnd).toBe(142); + expect(t.volume).toBeNull(); + expect(t.formatHints.digital).toBe(true); + expect(t.seriesGuess).toBe("Dandadan"); + }); + + it("parses a volume range (`v01-14` from the user's mixed-format screenshot)", () => { + const t = parseTitle("[1r0n] Boruto v01-14 (Digital) (1r0n)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.volume).toBe(1); + expect(t.volumeRangeEnd).toBe(14); + expect(t.seriesGuess).toBe("Boruto"); + }); + + it("parses a Tankobon-Blur Vol. NN release", () => { + const t = parseTitle("[Tankobon Blur] Solo Leveling Vol. 13 (2024) (Digital) (Tankobon Blur)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.group).toBe("Tankobon Blur"); + expect(t.volume).toBe(13); + expect(t.formatHints.digital).toBe(true); + expect(t.seriesGuess).toBe("Solo Leveling"); + }); + + it("parses a plain release without leading group token", () => { + const t = parseTitle("Berserk Volume 42 (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.group).toBeNull(); + expect(t.volume).toBe(42); + expect(t.formatHints.digital).toBe(true); + expect(t.seriesGuess).toBe("Berserk"); + }); + + it("preserves decimal chapters", () => { + const t = parseTitle("[Group] Some Series c47.5 (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.chapter).toBe(47.5); + expect(t.seriesGuess).toBe("Some Series"); + }); + + it("captures JXL format hint", () => { + const t = parseTitle("[1r0n] One Piece v107 (Digital) (JXL)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.formatHints.digital).toBe(true); + expect(t.formatHints.jxl).toBe(true); + }); + + it("returns null for an empty title", () => { + expect(parseTitle("")).toBeNull(); + expect(parseTitle(" ")).toBeNull(); + }); + + it("falls back to the raw title (no axis info) when no chapter/volume tokens are present", () => { + const t = parseTitle("Just Some Manga Tanks (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.chapter).toBeNull(); + expect(t.volume).toBeNull(); + expect(t.seriesGuess).toBe("Just Some Manga Tanks"); + expect(t.formatHints.digital).toBe(true); + }); + + it("handles the 'ch.' prefix variant alongside the c.NNN form", () => { + const t = parseTitle("[Group] My Series ch.143 (Digital)"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.chapter).toBe(143); + expect(t.seriesGuess).toBe("My Series"); + }); + + it("ignores leading bracketed token when not followed by content", () => { + const t = parseTitle("[Group]"); + expect(t).not.toBeNull(); + if (t === null) return; + expect(t.group).toBe("Group"); + expect(t.seriesGuess).toBe(""); + }); +}); + +// ----------------------------------------------------------------------------- +// parseItem +// ----------------------------------------------------------------------------- + +const sampleItem = ` + + <![CDATA[[1r0n] Chainsaw Man - Chapter 142 (Digital)]]> + https://nyaa.si/download/12345.torrent + https://nyaa.si/view/12345 + Mon, 04 May 2026 02:31:00 GMT + ABC123def456 + +`; + +describe("parseItem", () => { + it("extracts title, link, guid, infoHash, and pubDate", () => { + const item = parseItem(sampleItem); + expect(item).not.toBeNull(); + if (item === null) return; + expect(item.title).toBe("[1r0n] Chainsaw Man - Chapter 142 (Digital)"); + expect(item.link).toBe("https://nyaa.si/download/12345.torrent"); + expect(item.externalReleaseId).toBe("https://nyaa.si/view/12345"); // guid wins + expect(item.infoHash).toBe("abc123def456"); // lowercased + expect(item.chapter).toBe(142); + expect(item.seriesGuess).toBe("Chainsaw Man"); + expect(new Date(item.observedAt).toISOString()).toBe("2026-05-04T02:31:00.000Z"); + }); + + it("returns null when title is missing", () => { + expect(parseItem("x")).toBeNull(); + }); + + it("derives a deterministic externalReleaseId from infoHash when guid+link missing", () => { + const xml = ` + <![CDATA[[1r0n] Foo c.1 (Digital)]]> + DEADBEEF + `; + const item = parseItem(xml); + expect(item).not.toBeNull(); + if (item === null) return; + expect(item.externalReleaseId).toBe("urn:btih:deadbeef"); + }); + + it("uses a hashed fallback when guid, link, and infoHash are all missing", () => { + const xml = ` + <![CDATA[Foo c.1 (Digital)]]> + Mon, 04 May 2026 02:31:00 GMT + `; + const item = parseItem(xml); + expect(item).not.toBeNull(); + if (item === null) return; + expect(item.externalReleaseId).toMatch(/^t:[a-z0-9]+$/); + }); +}); + +// ----------------------------------------------------------------------------- +// parseFeed — full RSS body +// ----------------------------------------------------------------------------- + +const fullFeedXml = ` + + + Nyaa - 1r0n's torrents + + <![CDATA[[1r0n] Boruto - Two Blue Vortex - Volume 02 (Digital) (1r0n)]]> + https://nyaa.si/download/1.torrent + https://nyaa.si/view/1 + Mon, 04 May 2026 02:31:00 GMT + aaa + + + <![CDATA[[1r0n] Boruto v01-14 (Digital) (1r0n)]]> + https://nyaa.si/download/2.torrent + https://nyaa.si/view/2 + Sun, 03 May 2026 12:00:00 GMT + bbb + + + <![CDATA[[1r0n] Dandadan c126-142 (2024) (Digital)]]> + https://nyaa.si/download/3.torrent + https://nyaa.si/view/3 + Sat, 02 May 2026 22:00:00 GMT + ccc + + + + + +`; + +describe("parseFeed", () => { + it("parses every well-formed item and silently drops malformed ones", () => { + const items = parseFeed(fullFeedXml); + expect(items).toHaveLength(3); // empty-title item dropped + expect(items.map((i) => i.seriesGuess)).toEqual([ + "Boruto Two Blue Vortex", + "Boruto", + "Dandadan", + ]); + expect(items[0]?.volume).toBe(2); + expect(items[1]?.volumeRangeEnd).toBe(14); + expect(items[2]?.chapterRangeEnd).toBe(142); + }); +}); diff --git a/plugins/release-nyaa/src/parser.ts b/plugins/release-nyaa/src/parser.ts new file mode 100644 index 00000000..635fc843 --- /dev/null +++ b/plugins/release-nyaa/src/parser.ts @@ -0,0 +1,350 @@ +/** + * RSS parser for Nyaa.si feeds. + * + * Nyaa's RSS namespace exposes one extra element per item that we care about + * (``), plus the standard ``, `<link>`, `<guid>`, + * `<pubDate>`, and `<description>` fields. We pull all of them with the same + * lightweight regex pipeline used for MangaUpdates — no heavy XML dep. + * + * Parsing the title is where most of the work is. Real-world examples + * (sourced from production Nyaa feeds and the user's screenshot of 1r0n's + * subscription): + * + * "[1r0n] Boruto - Two Blue Vortex - Volume 02 (Digital) (1r0n)" + * "[1r0n] One Piece v107 (Digital)" + * "[1r0n] Chainsaw Man - Chapter 142 (Digital)" + * "[Group] Dandadan c126-142 (2024) (Digital)" + * "[Tankobon Blur] Solo Leveling Vol. 13 (2024) (Digital) (Tankobon Blur)" + * "Berserk Volume 42 (Digital)" + * + * The shape we want out of each item: + * - parsed series guess (alias-free string used for matching) + * - chapter / volume axes (decimals supported on chapter) + * - format hints (Digital / JXL / etc.) + * - uploader-tagged group (if encoded as a leading `[Group]` token) + * + * Nyaa titles are noisy; we keep parsing best-effort and surface confidence + * downstream from the alias matcher rather than failing here. + */ + +/** Parsed item, pre-`ReleaseCandidate`. */ +export interface ParsedRssItem { + /** Stable per-source ID. Derived from the link or guid. */ + externalReleaseId: string; + /** Original title. Useful for debugging / fallback. */ + title: string; + /** Series-name guess after stripping volume/chapter/group/format tokens. */ + seriesGuess: string; + /** Chapter number (decimals supported). Null if untyped. */ + chapter: number | null; + /** Trailing chapter of a chapter range (e.g. `c126-142` → 126..142). */ + chapterRangeEnd: number | null; + /** Volume number. Null if untyped. */ + volume: number | null; + /** Trailing volume of a volume range (e.g. `v01-14` → 1..14). */ + volumeRangeEnd: number | null; + /** Leading `[Group]` token, if any. */ + group: string | null; + /** Format hints as a small dictionary (digital, jxl, ...). */ + formatHints: Record<string, boolean>; + /** Magnet/torrent link or release page URL. */ + link: string; + /** `nyaa:infoHash` value, lowercased; null if missing. */ + infoHash: string | null; + /** ISO-8601 timestamp. Falls back to "now" if pubDate is missing/invalid. */ + observedAt: string; +} + +// ----------------------------------------------------------------------------- +// XML helpers (mirror release-mangaupdates conventions) +// ----------------------------------------------------------------------------- + +function decodeXmlText(raw: string): string { + let s = raw.trim(); + const cdataMatch = s.match(/^<!\[CDATA\[([\s\S]*?)]]>$/); + if (cdataMatch?.[1] !== undefined) { + s = cdataMatch[1]; + } + return s + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") + .replace(/"/g, '"') + .replace(/'/g, "'") + .replace(/'/g, "'"); +} + +/** Pull the first `<tag>` text content from an XML fragment, or null. */ +function extractTagText(xml: string, tag: string): string | null { + // Escape `:` for namespaced tags (e.g. `nyaa:infoHash`). + const safeTag = tag.replace(/:/g, "\\:"); + const re = new RegExp(`<${safeTag}[^>]*>([\\s\\S]*?)</${safeTag}>`, "i"); + const m = xml.match(re); + if (!m?.[1]) return null; + return decodeXmlText(m[1]); +} + +function splitItems(xml: string): string[] { + const out: string[] = []; + const re = /<item\b[^>]*>([\s\S]*?)<\/item>/gi; + for (;;) { + const match = re.exec(xml); + if (match === null) break; + if (match[1] !== undefined) out.push(match[1]); + } + return out; +} + +// ----------------------------------------------------------------------------- +// Title parsing +// ----------------------------------------------------------------------------- + +/** + * Strip a leading `[Group]` token off the title and return both pieces. + * If the title has no leading bracketed token, returns `{ rest: title, + * group: null }`. + */ +function extractLeadingGroup(title: string): { rest: string; group: string | null } { + const m = title.match(/^\s*\[([^\]]+)\]\s*(.*)$/); + if (!m?.[1]) return { rest: title, group: null }; + const group = m[1].trim(); + const rest = m[2] ?? ""; + return { rest, group: group.length > 0 ? group : null }; +} + +/** + * Pull a chapter / chapter-range out of the noise. + * + * Accepts: + * - `c.143`, `ch.143`, `Chapter 143`, `chapter 143` + * - `c143`, `ch143` (no separator) + * - `c126-142` (range — we keep both ends) + * - decimals (`c.47.5`) + */ +function extractChapter(s: string): { chapter: number | null; chapterRangeEnd: number | null } { + // Range: `c126-142` (also `ch.126-142`, `Chapter 126-142`) + const rangeRe = /\b(?:c|ch|chapter)\.?\s*([0-9]+(?:\.[0-9]+)?)\s*[-–]\s*([0-9]+(?:\.[0-9]+)?)\b/i; + const range = s.match(rangeRe); + if (range?.[1] && range[2]) { + const start = Number.parseFloat(range[1]); + const end = Number.parseFloat(range[2]); + if (Number.isFinite(start) && Number.isFinite(end)) { + return { chapter: start, chapterRangeEnd: end }; + } + } + // Single: `c.143`, `c143`, `Chapter 143`. Exclude things like `c8000` that + // look like a resolution/codec by capping at 5 digits — Nyaa chapters + // seldom go above 9999. + const singleRe = /\b(?:c|ch|chapter)\.?\s*([0-9]{1,4}(?:\.[0-9]+)?)\b/i; + const single = s.match(singleRe); + if (single?.[1]) { + const n = Number.parseFloat(single[1]); + if (Number.isFinite(n)) return { chapter: n, chapterRangeEnd: null }; + } + return { chapter: null, chapterRangeEnd: null }; +} + +/** + * Pull a volume / volume-range out of the noise. + * + * Accepts: + * - `v01`, `v1`, `vol.1`, `vol 1`, `Volume 1`, `Vol. 1` + * - ranges: `v01-14`, `Vol. 1-14` + */ +function extractVolume(s: string): { volume: number | null; volumeRangeEnd: number | null } { + // Range first. + const rangeRe = /\b(?:v|vol|volume)\.?\s*([0-9]+)\s*[-–]\s*([0-9]+)\b/i; + const range = s.match(rangeRe); + if (range?.[1] && range[2]) { + const start = Number.parseInt(range[1], 10); + const end = Number.parseInt(range[2], 10); + if (Number.isFinite(start) && Number.isFinite(end)) { + return { volume: start, volumeRangeEnd: end }; + } + } + const singleRe = /\b(?:v|vol|volume)\.?\s*([0-9]{1,4})\b/i; + const single = s.match(singleRe); + if (single?.[1]) { + const n = Number.parseInt(single[1], 10); + if (Number.isFinite(n)) return { volume: n, volumeRangeEnd: null }; + } + return { volume: null, volumeRangeEnd: null }; +} + +/** + * Walk the parenthesized tags in the title and extract format hints. + * + * Common Nyaa hints we care about: + * - `(Digital)` → `digital` + * - `(JXL)` → `jxl` + * - `(Mag-Z)` / `(Magazine)` → `magazine` + * - `(2024)` is a year, ignored (we'd need it for naming dedup but not for filtering) + */ +function extractFormatHints(s: string): Record<string, boolean> { + const hints: Record<string, boolean> = {}; + const tagRe = /\(([^)]+)\)/g; + for (;;) { + const match = tagRe.exec(s); + if (match === null) break; + const tag = (match[1] ?? "").trim().toLowerCase(); + if (tag.length === 0) continue; + if (tag === "digital") hints.digital = true; + else if (tag === "jxl") hints.jxl = true; + else if (tag === "magazine" || tag === "mag-z") hints.magazine = true; + else if (tag === "webtoon") hints.webtoon = true; + else if (tag === "bw" || tag === "b&w") hints.bw = true; + else if (tag === "color") hints.color = true; + } + return hints; +} + +/** + * Heuristic: strip everything that looks like a chapter/volume/format token, + * a parenthesized tag, or a leading `[Group]` to expose a clean series-name + * guess. The remaining string is alias-normalized downstream by the matcher. + */ +function extractSeriesGuess(input: string, group: string | null): string { + let s = input; + + // Drop everything in (...) — format hints, year, group repeated. + s = s.replace(/\([^)]*\)/g, " "); + + // Drop chapter/volume tokens (single or range). + s = s.replace( + /\b(?:c|ch|chapter)\.?\s*[0-9]+(?:\.[0-9]+)?(?:\s*[-–]\s*[0-9]+(?:\.[0-9]+)?)?\b/gi, + " ", + ); + s = s.replace(/\b(?:v|vol|volume)\.?\s*[0-9]+(?:\s*[-–]\s*[0-9]+)?\b/gi, " "); + + // Drop trailing/leading separator dashes used as titling glue (e.g. + // `Boruto - Two Blue Vortex - Volume 02` → `Boruto Two Blue Vortex`). + s = s.replace(/\s+[-–—]\s+/g, " "); + + // If the leading group token survived, drop it. + if (group) { + const groupRe = new RegExp(`\\[\\s*${escapeRegex(group)}\\s*\\]`, "gi"); + s = s.replace(groupRe, " "); + } + + // Drop misc dotted-extension tokens (filenames sometimes leak through). + s = s.replace(/\b\w+\.(?:cbz|cbr|epub|pdf|mobi|7z|zip)\b/gi, " "); + + // Collapse whitespace. + return s.replace(/\s+/g, " ").trim(); +} + +function escapeRegex(s: string): string { + return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); +} + +/** + * Public entry point — extract the structured fields from a single Nyaa + * release title. + * + * Returns null only if the title is empty after trimming. Otherwise returns a + * best-effort parse where the series guess may still be empty (e.g. for + * meta-bundles without a leading series name); the matcher then drops those. + */ +export function parseTitle(title: string): { + seriesGuess: string; + chapter: number | null; + chapterRangeEnd: number | null; + volume: number | null; + volumeRangeEnd: number | null; + group: string | null; + formatHints: Record<string, boolean>; +} | null { + const trimmed = title.trim(); + if (trimmed.length === 0) return null; + + const { rest, group } = extractLeadingGroup(trimmed); + const { chapter, chapterRangeEnd } = extractChapter(rest); + const { volume, volumeRangeEnd } = extractVolume(rest); + const formatHints = extractFormatHints(rest); + const seriesGuess = extractSeriesGuess(rest, group); + + return { + seriesGuess, + chapter, + chapterRangeEnd, + volume, + volumeRangeEnd, + group, + formatHints, + }; +} + +// ----------------------------------------------------------------------------- +// Item parsing +// ----------------------------------------------------------------------------- + +function pubDateToIso(raw: string | null): string { + if (raw) { + const d = new Date(raw); + if (!Number.isNaN(d.getTime())) return d.toISOString(); + } + return new Date().toISOString(); +} + +function deriveExternalReleaseId( + guid: string | null, + link: string | null, + infoHash: string | null, + title: string, + pubDate: string | null, +): string { + if (guid && guid.trim().length > 0) return guid.trim(); + if (link && link.trim().length > 0) return link.trim(); + if (infoHash && infoHash.length > 0) return `urn:btih:${infoHash}`; + // Deterministic fallback: djb2-ish hash. Same algorithm MangaUpdates uses. + const fallback = `${title}|${pubDate ?? ""}`; + let h = 5381; + for (let i = 0; i < fallback.length; i++) { + h = ((h << 5) + h + fallback.charCodeAt(i)) | 0; + } + return `t:${(h >>> 0).toString(36)}`; +} + +/** + * Parse a single Nyaa `<item>` block. Returns null when the title is missing + * (truly malformed entry). + */ +export function parseItem(itemXml: string): ParsedRssItem | null { + const title = extractTagText(itemXml, "title"); + if (!title) return null; + + const link = extractTagText(itemXml, "link"); + const guid = extractTagText(itemXml, "guid"); + const pubDate = extractTagText(itemXml, "pubDate"); + const infoHashRaw = extractTagText(itemXml, "nyaa:infoHash"); + const infoHash = infoHashRaw ? infoHashRaw.toLowerCase().trim() : null; + + const parsedTitle = parseTitle(title); + if (parsedTitle === null) return null; + + return { + externalReleaseId: deriveExternalReleaseId(guid, link, infoHash, title, pubDate), + title, + seriesGuess: parsedTitle.seriesGuess, + chapter: parsedTitle.chapter, + chapterRangeEnd: parsedTitle.chapterRangeEnd, + volume: parsedTitle.volume, + volumeRangeEnd: parsedTitle.volumeRangeEnd, + group: parsedTitle.group, + formatHints: parsedTitle.formatHints, + link: link ?? "", + infoHash, + observedAt: pubDateToIso(pubDate), + }; +} + +/** + * Parse a full Nyaa RSS feed body into structured items. Bad items (missing + * title) are dropped silently — Nyaa feeds occasionally include broken entries + * and we'd rather keep going than poison the whole poll. + */ +export function parseFeed(xml: string): ParsedRssItem[] { + return splitItems(xml) + .map(parseItem) + .filter((i): i is ParsedRssItem => i !== null); +} diff --git a/plugins/release-nyaa/tsconfig.json b/plugins/release-nyaa/tsconfig.json new file mode 100644 index 00000000..ef1ca5f9 --- /dev/null +++ b/plugins/release-nyaa/tsconfig.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "NodeNext", + "moduleResolution": "NodeNext", + "lib": ["ES2022"], + "outDir": "./dist", + "rootDir": "./src", + "declaration": true, + "sourceMap": true, + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noImplicitReturns": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist", "**/*.test.ts"] +} diff --git a/plugins/release-nyaa/vitest.config.ts b/plugins/release-nyaa/vitest.config.ts new file mode 100644 index 00000000..ae847ff6 --- /dev/null +++ b/plugins/release-nyaa/vitest.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + include: ["src/**/*.test.ts"], + }, +}); From ec17cfe16556eded02351520b50a6737b3d93565 Mon Sep 17 00:00:00 2001 From: Sylvain Cau <ashdevfr@gmail.com> Date: Mon, 4 May 2026 16:07:22 -0700 Subject: [PATCH 10/29] feat(release-tracking): persist notification preferences and clean up plugin config UI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Notification preferences for the Releases inbox lived only in an in-memory Zustand store, so language allowlists, plugin allowlists, and per-series mutes were lost on page reload. Move them to durable storage and clean up several adjacent rough edges in the plugin configuration UI. Persistence: - `release_tracking.notify_languages` and `release_tracking.notify_plugins` are now seeded as server-wide settings (Array, default `[]`) under the "Release Tracking" category. Admins manage them on the dedicated /settings/release-tracking page; the generic ServerSettings page hides the category to prevent two surfaces editing the same keys. - Per-series mute moves to `user_preferences.release_tracking.muted_series_ids` via the existing typed-preferences store (localStorage cache + debounced server sync). The series detail "Releases" panel gains a per-series mute toggle; the settings page shows the count and a "Clear all mutes" action. - The SSE handler in useEntityEvents replaces its store-based shouldNotify with a pure shouldNotifyRelease predicate that snapshots the latest settings from React Query and the latest mute list from the user-prefs store synchronously inside the callback. Bad JSON falls back to "no filter" so corrupted values never silently bypass filtering. - The release-announcements Zustand store is reduced to the unseen-count badge counter; everything else is durable now. Plugin configuration UI: - Surface the `releaseSource` capability flag on PluginCapabilitiesDto so the frontend can detect release-source plugins without parsing the manifest JSON. - Hide the Permissions / Scopes / Library Filter selectors on the Plugin Config modal for plugins whose only capability is releaseSource, userRecommendationProvider, or userReadSync. None of those flow through the row-level RBAC gate, the scoped action UI, or the library filter, so the empty selectors are misleading. Show an explanatory note instead. - Drop the dead sync branch from getScopeData / getPermissionData and the SYNC_SCOPES set; sync providers are gated by manifest capability only. - Plugins without a manifest still see the "test connection to discover capabilities" warning so first-time setup behavior is preserved. Plugin marketplace + seed: - Add release-mangaupdates and release-nyaa to the Official Plugins carousel under a new "Releases" type (orange badge). - Update seed-config.sample.yaml to drop the cargo-cult `metadata:read` permission from non-metadata plugins (recommendations, sync, release-*); it was decorative — those plugins are gated by manifest capability, not RBAC. Comments inline explain why. Adjacent cleanups: - Replace the "Plugin sources" TagsInput on the Release Tracking settings page with a MultiSelect populated from the registered-plugins list, filtered to release-source plugins. Stale entries (in the allowlist but not currently installed) render with a "(not installed)" suffix so admins can see + remove them. Tests cover the new shouldNotifyRelease predicate (mute, language allowlist case-insensitivity, plugin allowlist, bad-JSON fallback), the no-permissions branch on the Plugin Config modal for release/recommendation/sync plugins, and the new dropdown behavior on the Release Tracking settings page. --- config/seed-config.sample.yaml | 32 ++- docs/api/openapi.json | 4 + migration/src/lib.rs | 4 + ...76_seed_release_tracking_notify_filters.rs | 127 ++++++++++++ src/api/routes/v1/dto/plugins.rs | 6 + web/openapi.json | 4 + .../forms/PluginConfigModal.test.tsx | 85 ++++++++ .../forms/plugin-config/PermissionsTab.tsx | 34 ++++ .../components/forms/plugin-config/types.ts | 47 +++-- .../components/series/SeriesReleasesPanel.tsx | 60 +++++- web/src/hooks/useEntityEvents.test.ts | 73 ++++++- web/src/hooks/useEntityEvents.ts | 81 +++++++- .../pages/settings/PluginsSettings.test.tsx | 2 + .../settings/ReleaseTrackingSettings.test.tsx | 63 ++++++ .../settings/ReleaseTrackingSettings.tsx | 187 ++++++++++++++++-- web/src/pages/settings/ServerSettings.tsx | 9 +- .../settings/plugins/OfficialPlugins.tsx | 35 +++- .../store/releaseAnnouncementsStore.test.ts | 77 +------- web/src/store/releaseAnnouncementsStore.ts | 74 ++----- web/src/types/api.generated.ts | 5 + web/src/types/preferences.ts | 10 + 21 files changed, 832 insertions(+), 187 deletions(-) create mode 100644 migration/src/m20260504_000076_seed_release_tracking_notify_filters.rs diff --git a/config/seed-config.sample.yaml b/config/seed-config.sample.yaml index e63987ae..a5b3867c 100644 --- a/config/seed-config.sample.yaml +++ b/config/seed-config.sample.yaml @@ -88,26 +88,50 @@ plugins: credential_delivery: env # AniList Recommendations - Personalized manga recommendations + # Recommendation plugins are gated by manifest capability + per-user enable; + # they don't write metadata, so no `permissions` or `scopes` are needed. - name: recommendations-anilist display_name: AniList Recommendations description: Personalized manga recommendations from AniList based on reading history plugin_type: user command: node args: ["/opt/codex/plugins/recommendations-anilist/dist/index.js"] - permissions: - - "metadata:read" + permissions: [] scopes: [] credential_delivery: env # AniList Sync - Reading progress sync + # Sync plugins are gated by manifest capability + per-user enable; they don't + # write series/book metadata, so no `permissions` or `scopes` are needed. - name: sync-anilist display_name: AniList Sync description: Sync manga reading progress between Codex and AniList plugin_type: user command: node args: ["/opt/codex/plugins/sync-anilist/dist/index.js"] - permissions: - - "metadata:read" + permissions: [] + scopes: [] + credential_delivery: env + + # MangaUpdates Releases - Translation/scanlation release feed (no credentials needed) + # Release-source plugins are gated by manifest capability at reverse-RPC + # dispatch; they don't write metadata, so no `permissions` or `scopes` are needed. + - name: release-mangaupdates + display_name: MangaUpdates Releases + description: Announces new chapter releases for tracked series via MangaUpdates per-series RSS feeds + command: node + args: ["/opt/codex/plugins/release-mangaupdates/dist/index.js"] + permissions: [] + scopes: [] + credential_delivery: env + + # Nyaa Releases - Acquisition-pointer source for trusted uploaders (no credentials needed) + - name: release-nyaa + display_name: Nyaa Releases + description: Announces new chapter / volume torrents for tracked series via Nyaa.si uploader RSS feeds + command: node + args: ["/opt/codex/plugins/release-nyaa/dist/index.js"] + permissions: [] scopes: [] credential_delivery: env diff --git a/docs/api/openapi.json b/docs/api/openapi.json index 4aaa6216..8a878e34 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -30653,6 +30653,10 @@ }, "description": "Content types this plugin can provide metadata for (e.g., [\"series\", \"book\"])" }, + "releaseSource": { + "type": "boolean", + "description": "Whether the plugin declares the `release_source` capability (announces\nnew chapter / volume releases for tracked series)." + }, "userReadSync": { "type": "boolean", "description": "Can sync user reading progress" diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 7a641ed6..9655131a 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -155,6 +155,8 @@ mod m20260503_000073_create_release_ledger; mod m20260504_000074_add_tracking_languages; // Release tracking (Phase 6): server-wide default language list mod m20260504_000075_seed_release_tracking_languages; +// Release tracking (Phase 8 follow-up): server-wide notification filter settings +mod m20260504_000076_seed_release_tracking_notify_filters; pub struct Migrator; @@ -280,6 +282,8 @@ impl MigratorTrait for Migrator { Box::new(m20260504_000074_add_tracking_languages::Migration), // Release tracking (Phase 6): server-wide default language list Box::new(m20260504_000075_seed_release_tracking_languages::Migration), + // Release tracking (Phase 8 follow-up): notification filter settings + Box::new(m20260504_000076_seed_release_tracking_notify_filters::Migration), ] } } diff --git a/migration/src/m20260504_000076_seed_release_tracking_notify_filters.rs b/migration/src/m20260504_000076_seed_release_tracking_notify_filters.rs new file mode 100644 index 00000000..105a8c73 --- /dev/null +++ b/migration/src/m20260504_000076_seed_release_tracking_notify_filters.rs @@ -0,0 +1,127 @@ +//! Seed the server-wide `release_tracking.notify_languages` and +//! `release_tracking.notify_plugins` settings (Phase 8 follow-up). +//! +//! These two arrays filter the in-app `release_announced` notification stream +//! (toasts + Releases nav badge): +//! +//! - `notify_languages` (ISO 639-1, default `[]`): when non-empty, only +//! announcements whose `language` is in this list bump the badge / surface +//! a toast. Empty = "let everything through." +//! +//! - `notify_plugins` (plugin IDs, default `[]`): when non-empty, only +//! announcements emitted by a plugin in this list bump the badge / surface +//! a toast. Empty = "all installed release-source plugins are allowed." +//! +//! These filters are server-wide because all admins of a Codex instance share +//! the same notification stream. Per-series mute lives on +//! `user_preferences.release_tracking.muted_series_ids` (per-user) — the +//! distinction is that muting individual series is a personal-pref override +//! over what would otherwise be a shared global notification, while the +//! language / plugin allowlists shape the global stream itself. +//! +//! Defaults are empty arrays (no filtering) so a fresh install behaves like +//! the old in-memory store: every announcement notifies. Admins can tighten +//! later via the `/settings/release-tracking` page. + +use sea_orm::{ActiveModelTrait, Set, Statement, entity::prelude::*}; +use sea_orm_migration::prelude::*; +use uuid::Uuid; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "settings")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: Uuid, + pub key: String, + pub value: String, + pub value_type: String, + pub category: String, + pub description: String, + pub is_sensitive: bool, + pub default_value: String, + pub validation_rules: Option<String>, + pub min_value: Option<i64>, + pub max_value: Option<i64>, + pub updated_at: chrono::DateTime<chrono::Utc>, + pub updated_by: Option<Uuid>, + pub version: i32, + pub deleted_at: Option<chrono::DateTime<chrono::Utc>>, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} + +const KEYS: &[(&str, &str)] = &[ + ( + "release_tracking.notify_languages", + "Server-wide allowlist of ISO 639-1 language codes for release-tracking notifications. When non-empty, only announcements whose language is in this list bump the Releases badge and surface a toast. Empty array = let everything through.", + ), + ( + "release_tracking.notify_plugins", + "Server-wide allowlist of release-source plugin IDs whose announcements should bump the Releases badge and surface a toast. Empty array = all installed release-source plugins are allowed.", + ), +]; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let db = manager.get_connection(); + + for (key, description) in KEYS { + // Idempotent seed. Static string concat is safe; `key` is a + // compile-time constant from the KEYS table. + let exists = db + .query_one(Statement::from_string( + manager.get_database_backend(), + format!( + "SELECT COUNT(*) as count FROM settings WHERE key = '{}'", + key + ), + )) + .await?; + if let Some(row) = exists { + let count: i64 = row.try_get("", "count")?; + if count > 0 { + continue; + } + } + + let setting = ActiveModel { + id: Set(Uuid::new_v4()), + key: Set((*key).to_string()), + value: Set("[]".to_string()), + value_type: Set("Array".to_string()), + category: Set("Release Tracking".to_string()), + description: Set((*description).to_string()), + is_sensitive: Set(false), + default_value: Set("[]".to_string()), + validation_rules: Set(None), + min_value: Set(None), + max_value: Set(None), + updated_at: Set(chrono::Utc::now()), + updated_by: Set(None), + version: Set(1), + deleted_at: Set(None), + }; + setting.insert(db).await?; + } + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let db = manager.get_connection(); + for (key, _) in KEYS { + db.execute(Statement::from_string( + manager.get_database_backend(), + format!("DELETE FROM settings WHERE key = '{}'", key), + )) + .await?; + } + Ok(()) + } +} diff --git a/src/api/routes/v1/dto/plugins.rs b/src/api/routes/v1/dto/plugins.rs index 80de669c..4499ea9d 100644 --- a/src/api/routes/v1/dto/plugins.rs +++ b/src/api/routes/v1/dto/plugins.rs @@ -403,10 +403,15 @@ pub struct PluginCapabilitiesDto { /// Can provide personalized recommendations #[serde(default)] pub user_recommendation_provider: bool, + /// Whether the plugin declares the `release_source` capability (announces + /// new chapter / volume releases for tracked series). + #[serde(default)] + pub release_source: bool, } impl From<PluginCapabilities> for PluginCapabilitiesDto { fn from(c: PluginCapabilities) -> Self { + let release_source = c.is_release_source(); Self { metadata_provider: c .metadata_provider @@ -416,6 +421,7 @@ impl From<PluginCapabilities> for PluginCapabilitiesDto { user_read_sync: c.user_read_sync, external_id_source: c.external_id_source, user_recommendation_provider: c.user_recommendation_provider, + release_source, } } } diff --git a/web/openapi.json b/web/openapi.json index 4aaa6216..8a878e34 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -30653,6 +30653,10 @@ }, "description": "Content types this plugin can provide metadata for (e.g., [\"series\", \"book\"])" }, + "releaseSource": { + "type": "boolean", + "description": "Whether the plugin declares the `release_source` capability (announces\nnew chapter / volume releases for tracked series)." + }, "userReadSync": { "type": "boolean", "description": "Can sync user reading progress" diff --git a/web/src/components/forms/PluginConfigModal.test.tsx b/web/src/components/forms/PluginConfigModal.test.tsx index 0b3c9aad..508416ba 100644 --- a/web/src/components/forms/PluginConfigModal.test.tsx +++ b/web/src/components/forms/PluginConfigModal.test.tsx @@ -297,4 +297,89 @@ describe("PluginConfigModal", () => { screen.queryByText("Configure: Test Plugin"), ).not.toBeInTheDocument(); }); + + it("hides permission selectors and shows an explanatory note for release-source plugins", () => { + const plugin = createMockPlugin({ + manifest: { + name: "release-mangaupdates", + displayName: "MangaUpdates Releases", + version: "1.0.0", + protocolVersion: "1.0", + capabilities: { releaseSource: true }, + contentTypes: [], + } as never, + }); + + renderWithProviders( + <PluginConfigModal + plugin={plugin} + opened={true} + onClose={vi.fn()} + libraries={mockLibraries} + />, + ); + + expect( + screen.getByText(/No permission settings for this plugin/), + ).toBeInTheDocument(); + expect(screen.queryByPlaceholderText("Select permissions")).toBeNull(); + expect(screen.queryByPlaceholderText("Select scopes")).toBeNull(); + expect(screen.queryByText("Library Filter")).toBeNull(); + }); + + it("hides permission selectors for recommendation-only plugins", () => { + const plugin = createMockPlugin({ + manifest: { + name: "recommendations-anilist", + displayName: "AniList Recommendations", + version: "1.0.0", + protocolVersion: "1.0", + capabilities: { userRecommendationProvider: true }, + contentTypes: [], + } as never, + }); + + renderWithProviders( + <PluginConfigModal + plugin={plugin} + opened={true} + onClose={vi.fn()} + libraries={mockLibraries} + />, + ); + + expect( + screen.getByText(/No permission settings for this plugin/), + ).toBeInTheDocument(); + expect(screen.queryByPlaceholderText("Select permissions")).toBeNull(); + }); + + it("hides permission selectors for sync-only plugins", () => { + const plugin = createMockPlugin({ + manifest: { + name: "sync-anilist", + displayName: "AniList Sync", + version: "1.0.0", + protocolVersion: "1.0", + capabilities: { metadataProvider: [], userReadSync: true }, + contentTypes: [], + } as never, + }); + + renderWithProviders( + <PluginConfigModal + plugin={plugin} + opened={true} + onClose={vi.fn()} + libraries={mockLibraries} + />, + ); + + expect( + screen.getByText(/No permission settings for this plugin/), + ).toBeInTheDocument(); + expect(screen.queryByPlaceholderText("Select permissions")).toBeNull(); + expect(screen.queryByPlaceholderText("Select scopes")).toBeNull(); + expect(screen.queryByText("Library Filter")).toBeNull(); + }); }); diff --git a/web/src/components/forms/plugin-config/PermissionsTab.tsx b/web/src/components/forms/plugin-config/PermissionsTab.tsx index 985ce31f..66729861 100644 --- a/web/src/components/forms/plugin-config/PermissionsTab.tsx +++ b/web/src/components/forms/plugin-config/PermissionsTab.tsx @@ -11,6 +11,10 @@ import type { PluginDto } from "@/api/plugins"; import { getPermissionData, getScopeData, + hasPermissionableSurface, + isRecommendationProvider, + isReleaseSource, + isSyncProvider, type PluginConfigForm, } from "./types"; @@ -25,6 +29,36 @@ export function PermissionsTab({ form, libraries, }: PermissionsTabProps) { + // Plugins whose only capabilities are `releaseSource`, + // `userRecommendationProvider`, or `userReadSync` don't go through the RBAC + // permission gate, don't expose scoped UI actions, and aren't + // library-filtered. Render an explanatory note instead of empty selectors. + if (!hasPermissionableSurface(plugin)) { + const capabilityLabel = isReleaseSource(plugin) + ? "Release-source" + : isRecommendationProvider(plugin) + ? "Recommendation" + : isSyncProvider(plugin) + ? "Sync" + : null; + return ( + <Stack gap="md"> + <Alert + icon={<IconInfoCircle size={16} />} + color="blue" + variant="light" + title="No permission settings for this plugin" + > + <Text size="sm"> + {capabilityLabel + ? `${capabilityLabel} plugins are gated by their manifest capability — they don't write metadata, don't expose scoped UI actions, and aren't library-filtered. There is nothing to configure on this tab.` + : "This plugin doesn't expose any capability that uses permissions, scopes, or the library filter."} + </Text> + </Alert> + </Stack> + ); + } + const permissionInfo = getPermissionData(plugin); const scopeData = getScopeData(plugin); diff --git a/web/src/components/forms/plugin-config/types.ts b/web/src/components/forms/plugin-config/types.ts index 434dd35e..737b496c 100644 --- a/web/src/components/forms/plugin-config/types.ts +++ b/web/src/components/forms/plugin-config/types.ts @@ -15,6 +15,40 @@ export function isSyncProvider(plugin: PluginDto): boolean { return plugin.manifest?.capabilities?.userReadSync === true; } +export function isReleaseSource(plugin: PluginDto): boolean { + return plugin.manifest?.capabilities?.releaseSource === true; +} + +export function isRecommendationProvider(plugin: PluginDto): boolean { + return plugin.manifest?.capabilities?.userRecommendationProvider === true; +} + +/** + * Returns true if the plugin has any capability for which permissions, + * scopes, or the library filter actually do something. + * + * Only metadata providers go through these row-level controls: + * - `permissions` are checked on the metadata-apply path + * (`src/services/metadata/apply.rs`, `book_apply.rs`). + * - `scopes` + `library_ids` are checked when the UI lists plugin actions + * for a series/book/library context + * (`src/services/plugin/manager.rs::plugins_by_scope_and_library`). + * + * Release-source, recommendation, and sync plugins are gated only by + * manifest capability (checked at reverse-RPC dispatch in + * `src/services/plugin/permissions.rs`); they don't write metadata, don't + * expose scoped UI actions, and aren't library-filtered. Showing those + * fields when they have no effect is misleading — an empty state suggests + * "you forgot to configure something" when there's nothing to configure. + * + * Plugins without a manifest are considered permissionable so the existing + * "test this plugin to discover its capabilities" warning still triggers. + */ +export function hasPermissionableSurface(plugin: PluginDto): boolean { + if (!hasManifest(plugin)) return true; + return isMetadataProvider(plugin); +} + export function isOAuthPlugin(plugin: PluginDto): boolean { return plugin.manifest?.oauth != null; } @@ -65,7 +99,6 @@ const LIBRARY_PERMISSION_VALUES = new Set( export function getPermissionData(plugin: PluginDto) { const isMeta = isMetadataProvider(plugin); - const isSync = isSyncProvider(plugin); const noManifest = !hasManifest(plugin); if (noManifest) { @@ -98,9 +131,6 @@ export function getPermissionData(plugin: PluginDto) { METADATA_PERMISSION_VALUES.has(p.value), ).map((p) => ({ value: p.value, label: p.label })), }); - } - - if (isSync || isMeta) { groups.push({ group: "Library", items: AVAILABLE_PERMISSIONS.filter((p) => @@ -132,13 +162,6 @@ const BOOK_SCOPES = new Set([ "library:scan", ]); -// Sync providers operate at series/library level -const SYNC_SCOPES = new Set([ - "series:detail", - "library:detail", - "library:scan", -]); - export function getScopeData(plugin: PluginDto) { const noManifest = !hasManifest(plugin); @@ -149,12 +172,10 @@ export function getScopeData(plugin: PluginDto) { const metadataTargets = plugin.manifest?.capabilities?.metadataProvider ?? []; const canSeries = metadataTargets.includes("series"); const canBook = metadataTargets.includes("book"); - const isSync = isSyncProvider(plugin); const allowed = new Set<string>(); if (canSeries) for (const s of SERIES_SCOPES) allowed.add(s); if (canBook) for (const s of BOOK_SCOPES) allowed.add(s); - if (isSync) for (const s of SYNC_SCOPES) allowed.add(s); return AVAILABLE_SCOPES.filter((s) => allowed.has(s.value)).map((s) => ({ value: s.value, diff --git a/web/src/components/series/SeriesReleasesPanel.tsx b/web/src/components/series/SeriesReleasesPanel.tsx index fbc85b53..6673b4ef 100644 --- a/web/src/components/series/SeriesReleasesPanel.tsx +++ b/web/src/components/series/SeriesReleasesPanel.tsx @@ -11,6 +11,8 @@ import { Tooltip, } from "@mantine/core"; import { + IconBellOff, + IconBellRinging, IconCheck, IconExternalLink, IconRss, @@ -24,6 +26,7 @@ import { useMarkReleaseAcquired, useSeriesReleases, } from "@/hooks/useReleases"; +import { useUserPreference } from "@/hooks/useUserPreference"; interface SeriesReleasesPanelProps { seriesId: string; @@ -49,6 +52,20 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { const [showDismissed, setShowDismissed] = useState(false); const stateFilter = showDismissed ? undefined : "announced"; + // Per-user mute. Persisted via the user_preferences store with localStorage + // caching + debounced server sync. + const [mutedSeriesIds, setMutedSeriesIds] = useUserPreference( + "release_tracking.muted_series_ids", + ); + const isMuted = mutedSeriesIds.includes(seriesId); + const toggleMute = () => { + if (isMuted) { + setMutedSeriesIds(mutedSeriesIds.filter((id) => id !== seriesId)); + } else { + setMutedSeriesIds([...mutedSeriesIds, seriesId]); + } + }; + const { data, isLoading } = useSeriesReleases(seriesId, { state: stateFilter, pageSize: 100, @@ -98,15 +115,42 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { <Badge color="gray" variant="light" size="sm"> {data?.pagination.total ?? 0} </Badge> + {isMuted && ( + <Badge color="orange" variant="light" size="sm"> + Muted + </Badge> + )} + </Group> + <Group gap="xs"> + <Tooltip + label={ + isMuted + ? "Re-enable announcement toasts and badge for this series" + : "Stop announcement toasts and badge for this series (your account only)" + } + > + <ActionIcon + variant="subtle" + color={isMuted ? "orange" : "gray"} + onClick={toggleMute} + aria-label={isMuted ? "Unmute releases" : "Mute releases"} + > + {isMuted ? ( + <IconBellOff size={16} /> + ) : ( + <IconBellRinging size={16} /> + )} + </ActionIcon> + </Tooltip> + <Anchor + component="button" + type="button" + size="sm" + onClick={() => setShowDismissed((prev) => !prev)} + > + {showDismissed ? "Hide dismissed" : "Show all states"} + </Anchor> </Group> - <Anchor - component="button" - type="button" - size="sm" - onClick={() => setShowDismissed((prev) => !prev)} - > - {showDismissed ? "Hide dismissed" : "Show all states"} - </Anchor> </Group> {groups.length === 0 ? ( diff --git a/web/src/hooks/useEntityEvents.test.ts b/web/src/hooks/useEntityEvents.test.ts index 81cb9277..8fcd6126 100644 --- a/web/src/hooks/useEntityEvents.test.ts +++ b/web/src/hooks/useEntityEvents.test.ts @@ -7,7 +7,7 @@ import * as eventsApi from "@/api/events"; import { useAuthStore } from "@/store/authStore"; import { useCoverUpdatesStore } from "@/store/coverUpdatesStore"; import type { EntityChangeEvent } from "@/types"; -import { useEntityEvents } from "./useEntityEvents"; +import { shouldNotifyRelease, useEntityEvents } from "./useEntityEvents"; // Mock the events API vi.mock("@/api/events"); @@ -509,3 +509,74 @@ describe("useEntityEvents", () => { consoleError.mockRestore(); }); }); + +// ============================================================================= +// shouldNotifyRelease — pure filter predicate +// ============================================================================= + +describe("shouldNotifyRelease", () => { + const baseParams = { + seriesId: "s1", + pluginId: "release-nyaa", + language: "en", + notifyLanguagesValue: undefined, + notifyPluginsValue: undefined, + mutedSeriesIds: [] as readonly string[], + }; + + it("lets everything through when filters are empty", () => { + expect(shouldNotifyRelease(baseParams)).toBe(true); + }); + + it("blocks events for muted series", () => { + expect(shouldNotifyRelease({ ...baseParams, mutedSeriesIds: ["s1"] })).toBe( + false, + ); + }); + + it("enforces the language allowlist (case-insensitive)", () => { + // Allowlist is `["EN"]` (uppercase) and event language is `"en"` — + // the predicate normalizes both sides. + expect( + shouldNotifyRelease({ + ...baseParams, + notifyLanguagesValue: '["EN"]', + language: "en", + }), + ).toBe(true); + expect( + shouldNotifyRelease({ + ...baseParams, + notifyLanguagesValue: '["en"]', + language: "es", + }), + ).toBe(false); + }); + + it("enforces the plugin allowlist", () => { + expect( + shouldNotifyRelease({ + ...baseParams, + notifyPluginsValue: '["release-mangaupdates"]', + pluginId: "release-mangaupdates", + }), + ).toBe(true); + expect( + shouldNotifyRelease({ + ...baseParams, + notifyPluginsValue: '["release-mangaupdates"]', + pluginId: "release-nyaa", + }), + ).toBe(false); + }); + + it("treats invalid JSON in setting values as 'no filter'", () => { + expect( + shouldNotifyRelease({ + ...baseParams, + notifyLanguagesValue: "{not valid json}", + notifyPluginsValue: "also broken", + }), + ).toBe(true); + }); +}); diff --git a/web/src/hooks/useEntityEvents.ts b/web/src/hooks/useEntityEvents.ts index 24090583..086a6360 100644 --- a/web/src/hooks/useEntityEvents.ts +++ b/web/src/hooks/useEntityEvents.ts @@ -5,6 +5,7 @@ import { eventsApi } from "@/api/events"; import { useAuthStore } from "@/store/authStore"; import { useCoverUpdatesStore } from "@/store/coverUpdatesStore"; import { useReleaseAnnouncementsStore } from "@/store/releaseAnnouncementsStore"; +import { useUserPreferencesStore } from "@/store/userPreferencesStore"; import type { EntityChangeEvent } from "@/types"; import { createDevLog } from "@/utils/devLog"; @@ -12,6 +13,60 @@ type ConnectionState = "connecting" | "connected" | "disconnected" | "failed"; const log = createDevLog("[SSE]"); +/** Best-effort decode of a JSON-array string (settings + user_preferences + * values are stored as JSON-encoded strings). Non-string entries and parse + * failures collapse to an empty list. */ +function parseStringArray(value: string | undefined | null): string[] { + if (!value) return []; + try { + const parsed = JSON.parse(value); + return Array.isArray(parsed) + ? parsed.filter((v): v is string => typeof v === "string") + : []; + } catch { + return []; + } +} + +/** + * Decide whether a `release_announced` event should bump the badge / surface + * a toast for the current user. + * + * Three filters apply (in order): + * 1. Per-user mute (user_preferences) — drops the event for muted series. + * 2. Server-wide language allowlist — empty = let everything through. + * 3. Server-wide plugin allowlist — empty = let everything through. + * + * Pure helper, exported only for testing. + */ +export function shouldNotifyRelease(params: { + seriesId: string; + pluginId: string; + language: string; + notifyLanguagesValue: string | undefined | null; + notifyPluginsValue: string | undefined | null; + mutedSeriesIds: readonly string[]; +}): boolean { + if (params.mutedSeriesIds.includes(params.seriesId)) return false; + + const allowedLanguages = parseStringArray(params.notifyLanguagesValue).map( + (l) => l.toLowerCase(), + ); + if ( + allowedLanguages.length > 0 && + !allowedLanguages.includes(params.language.toLowerCase()) + ) { + return false; + } + + const allowedPlugins = parseStringArray(params.notifyPluginsValue); + if (allowedPlugins.length > 0 && !allowedPlugins.includes(params.pluginId)) { + return false; + } + + return true; +} + /** * React hook that subscribes to entity change events and automatically * invalidates relevant React Query caches when entities are created, @@ -231,17 +286,37 @@ function handleEntityEvent( } case "release_announced": { - const store = useReleaseAnnouncementsStore.getState(); + // Snapshot the latest filter state synchronously inside the SSE + // callback so the predicate sees fresh data on every event. + // + // Server-wide allowlists live in React Query cache (loaded by the + // settings page); per-user mutes live in the userPreferences store + // (auto-loaded + persisted to localStorage with debounced sync). + // + // The query keys here MUST match what the settings page uses — kept + // in sync explicitly so a typo doesn't silently bypass filtering. + const notifyLanguagesSetting = queryClient.getQueryData<{ + value?: string; + }>(["admin-setting", "release_tracking.notify_languages"]); + const notifyPluginsSetting = queryClient.getQueryData<{ + value?: string; + }>(["admin-setting", "release_tracking.notify_plugins"]); + const mutedSeriesIds = useUserPreferencesStore + .getState() + .getPreference("release_tracking.muted_series_ids"); if ( - !store.shouldNotify({ + !shouldNotifyRelease({ seriesId: event.seriesId, pluginId: event.pluginId, language: event.language ?? "", + notifyLanguagesValue: notifyLanguagesSetting?.value, + notifyPluginsValue: notifyPluginsSetting?.value, + mutedSeriesIds, }) ) { break; } - store.bump(); + useReleaseAnnouncementsStore.getState().bump(); // Refresh inbox + per-series ledger views in case the user is // watching them. diff --git a/web/src/pages/settings/PluginsSettings.test.tsx b/web/src/pages/settings/PluginsSettings.test.tsx index df50a4bc..d63c2cf1 100644 --- a/web/src/pages/settings/PluginsSettings.test.tsx +++ b/web/src/pages/settings/PluginsSettings.test.tsx @@ -1749,6 +1749,8 @@ describe("PluginsSettings - Official Plugins section", () => { expect(screen.getByText("Recommendations")).toBeInTheDocument(); // "Metadata" appears for Echo, Mangabaka, and Open Library plugins expect(screen.getAllByText("Metadata").length).toBeGreaterThanOrEqual(3); + // "Releases" appears for MangaUpdates and Nyaa plugins + expect(screen.getAllByText("Releases").length).toBeGreaterThanOrEqual(2); }); }); diff --git a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx index 3004ed4c..e269abf6 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx @@ -1,4 +1,5 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; +import { pluginsApi } from "@/api/plugins"; import { type ReleaseSource, releaseSourcesApi, @@ -22,9 +23,21 @@ vi.mock("@/api/releases", () => ({ }, })); +vi.mock("@/api/plugins", async (importOriginal) => { + const actual = await importOriginal<typeof import("@/api/plugins")>(); + return { + ...actual, + pluginsApi: { + ...actual.pluginsApi, + getAll: vi.fn(), + }, + }; +}); + const list = vi.mocked(releaseSourcesApi.list); const update = vi.mocked(releaseSourcesApi.update); const pollNow = vi.mocked(releaseSourcesApi.pollNow); +const getAllPlugins = vi.mocked(pluginsApi.getAll); function source(over: Partial<ReleaseSource> = {}): ReleaseSource { return { @@ -50,6 +63,8 @@ describe("ReleaseTrackingSettings", () => { beforeEach(() => { vi.clearAllMocks(); void releasesApi; + // Default: no plugins installed. Individual tests override as needed. + getAllPlugins.mockResolvedValue({ plugins: [], total: 0 }); }); it("renders sources and the OK status when last poll is fresh", async () => { @@ -115,4 +130,52 @@ describe("ReleaseTrackingSettings", () => { ); }); }); + + it("plugin-sources dropdown lists release-source plugins by display name", async () => { + list.mockResolvedValue([]); + // One release-source plugin + one metadata plugin to confirm filtering. + getAllPlugins.mockResolvedValue({ + plugins: [ + { + id: "p1", + name: "release-mangaupdates", + displayName: "MangaUpdates Releases", + manifest: { + name: "release-mangaupdates", + displayName: "MangaUpdates Releases", + capabilities: { releaseSource: true }, + }, + // The remaining PluginDto fields don't matter for this test. + } as never, + { + id: "p2", + name: "metadata-mangabaka", + displayName: "MangaBaka", + manifest: { + name: "metadata-mangabaka", + displayName: "MangaBaka", + capabilities: { metadataProvider: ["series"] }, + }, + } as never, + ], + total: 2, + }); + + const user = userEvent.setup(); + renderWithProviders(<ReleaseTrackingSettings />); + // Wait for the plugins query to settle (the dropdown only renders the + // release-source options once `pluginsApi.getAll` resolves). + await waitFor(() => { + expect(getAllPlugins).toHaveBeenCalled(); + }); + // Mantine MultiSelect renders an input with role=textbox associated with + // the label; clicking it opens the dropdown and shows the options. + const select = screen.getByRole("textbox", { name: "Plugin sources" }); + await user.click(select); + await waitFor(() => { + expect(screen.getByText("MangaUpdates Releases")).toBeInTheDocument(); + }); + // Metadata-only plugin is filtered out — should not appear as an option. + expect(screen.queryByText("MangaBaka")).not.toBeInTheDocument(); + }); }); diff --git a/web/src/pages/settings/ReleaseTrackingSettings.tsx b/web/src/pages/settings/ReleaseTrackingSettings.tsx index 17834ecb..9724ada6 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.tsx @@ -2,9 +2,11 @@ import { ActionIcon, Badge, Box, + Button, Card, Group, Loader, + MultiSelect, NumberInput, Stack, Switch, @@ -14,21 +16,43 @@ import { Title, Tooltip, } from "@mantine/core"; +import { notifications } from "@mantine/notifications"; import { IconAlertCircle, IconBellRinging, IconClockHour4, IconRefresh, + IconTrash, } from "@tabler/icons-react"; +import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"; import { formatDistanceToNow } from "date-fns"; -import { useState } from "react"; +import { useMemo, useState } from "react"; +import { pluginsApi } from "@/api/plugins"; import type { ReleaseSource } from "@/api/releases"; +import { settingsApi } from "@/api/settings"; import { usePollReleaseSourceNow, useReleaseSources, useUpdateReleaseSource, } from "@/hooks/useReleases"; -import { useReleaseAnnouncementsStore } from "@/store/releaseAnnouncementsStore"; +import { useUserPreference } from "@/hooks/useUserPreference"; + +const SETTING_NOTIFY_LANGUAGES = "release_tracking.notify_languages"; +const SETTING_NOTIFY_PLUGINS = "release_tracking.notify_plugins"; +const PREF_MUTED_SERIES = "release_tracking.muted_series_ids"; + +/** Parse a settings-table JSON-array value back to a string list. */ +function parseArraySetting(value: string | undefined | null): string[] { + if (!value) return []; + try { + const parsed = JSON.parse(value); + return Array.isArray(parsed) + ? parsed.filter((v): v is string => typeof v === "string") + : []; + } catch { + return []; + } +} const PRESETS = [ { value: 3600, label: "1h" }, @@ -133,17 +157,103 @@ export function ReleaseTrackingSettings() { } function NotificationPreferencesCard() { - const allowedLanguages = useReleaseAnnouncementsStore( - (s) => s.allowedLanguages, - ); - const allowedPlugins = useReleaseAnnouncementsStore((s) => s.allowedPlugins); - const setAllowedLanguages = useReleaseAnnouncementsStore( - (s) => s.setAllowedLanguages, + const queryClient = useQueryClient(); + + // Server-wide notify allowlists (admin-managed, persisted in `settings`). + const notifyLanguagesQuery = useQuery({ + queryKey: ["admin-setting", SETTING_NOTIFY_LANGUAGES], + queryFn: () => settingsApi.get(SETTING_NOTIFY_LANGUAGES), + }); + const notifyPluginsQuery = useQuery({ + queryKey: ["admin-setting", SETTING_NOTIFY_PLUGINS], + queryFn: () => settingsApi.get(SETTING_NOTIFY_PLUGINS), + }); + + // Per-user mute list (persisted in user_preferences via the user-prefs + // store, with localStorage caching + debounced server sync). Used here + // only for the count display + "Clear all mutes" action; per-series + // toggle lives on each series detail page. + const [mutedSeriesIds, setMutedSeriesIds] = + useUserPreference(PREF_MUTED_SERIES); + + // Pull every registered plugin so we can show release-source ones in the + // dropdown. Stale entries (in the allowlist but no longer installed) keep + // their slot in the option list so admins can see + remove them. + const pluginsQuery = useQuery({ + queryKey: ["plugins"], + queryFn: pluginsApi.getAll, + }); + + const allowedLanguages = useMemo( + () => parseArraySetting(notifyLanguagesQuery.data?.value), + [notifyLanguagesQuery.data], ); - const setAllowedPlugins = useReleaseAnnouncementsStore( - (s) => s.setAllowedPlugins, + const allowedPlugins = useMemo( + () => parseArraySetting(notifyPluginsQuery.data?.value), + [notifyPluginsQuery.data], ); + const pluginOptions = useMemo(() => { + const registered = (pluginsQuery.data?.plugins ?? []).filter( + (p) => p.manifest?.capabilities?.releaseSource === true, + ); + const seen = new Set<string>(); + const opts: { value: string; label: string }[] = []; + for (const p of registered) { + seen.add(p.name); + opts.push({ + value: p.name, + label: p.manifest?.displayName ?? p.name, + }); + } + for (const id of allowedPlugins) { + if (!seen.has(id)) { + opts.push({ value: id, label: `${id} (not installed)` }); + } + } + return opts; + }, [pluginsQuery.data, allowedPlugins]); + + // Persist a setting back to the server. Lower-cases language codes so the + // backend filter (`shouldNotify`) doesn't need to re-normalize. + const updateSettingMutation = useMutation({ + mutationFn: ({ key, values }: { key: string; values: string[] }) => + settingsApi.update(key, { value: JSON.stringify(values) }), + onSuccess: (_data, vars) => { + queryClient.invalidateQueries({ + queryKey: ["admin-setting", vars.key], + }); + }, + onError: (err: Error) => + notifications.show({ + title: "Failed to save", + message: err.message ?? "Could not update notification preferences.", + color: "red", + }), + }); + + const clearMutes = () => { + setMutedSeriesIds([]); + notifications.show({ + title: "Mutes cleared", + message: "All per-series mutes have been removed.", + color: "green", + }); + }; + + const setAllowedLanguages = (values: string[]) => + updateSettingMutation.mutate({ + key: SETTING_NOTIFY_LANGUAGES, + values: values + .map((v) => v.trim().toLowerCase()) + .filter((v) => v.length > 0), + }); + const setAllowedPlugins = (values: string[]) => + updateSettingMutation.mutate({ + key: SETTING_NOTIFY_PLUGINS, + values, + }); + return ( <Card withBorder padding="md" radius="md"> <Stack gap="sm"> @@ -153,23 +263,60 @@ function NotificationPreferencesCard() { </Group> <Text size="xs" c="dimmed"> Filter announcement toasts and the Releases nav badge. Empty means "no - filter — let everything through." Per-series mute lives on each series - detail page. + filter — let everything through." Server-wide for languages and plugin + sources; per-series mute is per-user (toggle on each series detail + page). </Text> <TagsInput label="Languages" - description="ISO 639-1 codes (e.g. en, es). Lower-cased automatically." + description="ISO 639-1 codes (e.g. en, es). Lower-cased automatically. Server-wide." placeholder="Add language code…" - value={Array.from(allowedLanguages)} - onChange={(values) => setAllowedLanguages(values)} + value={allowedLanguages} + onChange={setAllowedLanguages} + disabled={notifyLanguagesQuery.isLoading} /> - <TagsInput + <MultiSelect label="Plugin sources" - description="Plugin IDs (e.g. release-mangaupdates, release-nyaa)." - placeholder="Add plugin id…" - value={Array.from(allowedPlugins)} - onChange={(values) => setAllowedPlugins(values)} + description="Pick the release-source plugins to receive notifications from. Empty = all installed sources are allowed. Server-wide." + placeholder={ + allowedPlugins.length === 0 + ? "All release-source plugins" + : undefined + } + data={pluginOptions} + value={allowedPlugins} + onChange={setAllowedPlugins} + searchable + clearable + nothingFoundMessage={ + pluginsQuery.isLoading + ? "Loading plugins…" + : "No release-source plugins installed" + } + disabled={notifyPluginsQuery.isLoading} /> + <Group justify="space-between" mt="xs" wrap="nowrap"> + <Box> + <Text size="sm" fw={500}> + Muted series + </Text> + <Text size="xs" c="dimmed"> + {mutedSeriesIds.length === 0 + ? "No series muted for your account." + : `${mutedSeriesIds.length} series muted for your account.`} + </Text> + </Box> + <Button + size="xs" + variant="light" + color="red" + leftSection={<IconTrash size={14} />} + onClick={clearMutes} + disabled={mutedSeriesIds.length === 0} + > + Clear all mutes + </Button> + </Group> </Stack> </Card> ); diff --git a/web/src/pages/settings/ServerSettings.tsx b/web/src/pages/settings/ServerSettings.tsx index 79543e73..af0f3f13 100644 --- a/web/src/pages/settings/ServerSettings.tsx +++ b/web/src/pages/settings/ServerSettings.tsx @@ -469,7 +469,14 @@ export function ServerSettings() { ) : ( <Stack gap="md"> {Object.entries(groupedSettings) - .filter(([category]) => category.toLowerCase() !== "display") + // "display" is rendered above as a separate section. + // "Release Tracking" has its own dedicated page at + // /settings/release-tracking — hide it here so admins don't + // have two surfaces editing the same keys. + .filter(([category]) => { + const c = category.toLowerCase(); + return c !== "display" && c !== "release tracking"; + }) .sort(([a], [b]) => a.localeCompare(b)) .map(([category, categorySettings]) => ( <SettingsCategorySection diff --git a/web/src/pages/settings/plugins/OfficialPlugins.tsx b/web/src/pages/settings/plugins/OfficialPlugins.tsx index e1d3819d..d72d6cd8 100644 --- a/web/src/pages/settings/plugins/OfficialPlugins.tsx +++ b/web/src/pages/settings/plugins/OfficialPlugins.tsx @@ -38,7 +38,7 @@ export interface OfficialPlugin { /** Short description of what the plugin does */ description: string; /** Plugin type badge */ - type: "Metadata" | "Sync" | "Recommendations"; + type: "Metadata" | "Sync" | "Recommendations" | "Releases"; /** npm package name */ packageName: string; /** Short auth requirement description shown on the back face */ @@ -137,6 +137,38 @@ export const OFFICIAL_PLUGINS: OfficialPlugin[] = [ credentialDelivery: "env", }, }, + { + name: "release-mangaupdates", + displayName: "MangaUpdates Releases", + description: + "Announces new chapter and volume releases for tracked series via MangaUpdates per-series RSS feeds. Multi-language support filtered by per-series language preferences. Notify-only — Codex does not download anything.", + type: "Releases", + packageName: "@ashdev/codex-plugin-release-mangaupdates", + authInfo: "No authentication required", + author: "Codex Team", + scope: "system", + formDefaults: { + command: "npx", + args: "-y\n@ashdev/codex-plugin-release-mangaupdates", + credentialDelivery: "env", + }, + }, + { + name: "release-nyaa", + displayName: "Nyaa Releases", + description: + "Announces new chapter and volume torrents for tracked series via Nyaa.si uploader RSS feeds. Limited to an admin-configured uploader allowlist; matches via title aliases. Notify-only — Codex does not download anything.", + type: "Releases", + packageName: "@ashdev/codex-plugin-release-nyaa", + authInfo: "No authentication required", + author: "Codex Team", + scope: "system", + formDefaults: { + command: "npx", + args: "-y\n@ashdev/codex-plugin-release-nyaa", + credentialDelivery: "env", + }, + }, ]; // --------------------------------------------------------------------------- @@ -147,6 +179,7 @@ const pluginTypeBadgeColors: Record<OfficialPlugin["type"], string> = { Metadata: "blue", Sync: "teal", Recommendations: "grape", + Releases: "orange", }; const credentialLabels: Record<string, string> = { diff --git a/web/src/store/releaseAnnouncementsStore.test.ts b/web/src/store/releaseAnnouncementsStore.test.ts index 2896579e..30ba9976 100644 --- a/web/src/store/releaseAnnouncementsStore.test.ts +++ b/web/src/store/releaseAnnouncementsStore.test.ts @@ -3,15 +3,7 @@ import { useReleaseAnnouncementsStore } from "./releaseAnnouncementsStore"; describe("releaseAnnouncementsStore", () => { beforeEach(() => { - const store = useReleaseAnnouncementsStore.getState(); - store.reset(); - store.setAllowedLanguages([]); - store.setAllowedPlugins([]); - // Clear any leftover muted series from a prior test. - const muted = Array.from(store.mutedSeriesIds); - for (const id of muted) { - store.toggleMute(id); - } + useReleaseAnnouncementsStore.getState().reset(); }); it("bump increments and reset clears the unseen counter", () => { @@ -22,71 +14,4 @@ describe("releaseAnnouncementsStore", () => { store.reset(); expect(useReleaseAnnouncementsStore.getState().unseenCount).toBe(0); }); - - it("shouldNotify lets everything through when filters are empty", () => { - const { shouldNotify } = useReleaseAnnouncementsStore.getState(); - expect( - shouldNotify({ - seriesId: "s1", - pluginId: "release-nyaa", - language: "en", - }), - ).toBe(true); - }); - - it("shouldNotify blocks muted series", () => { - useReleaseAnnouncementsStore.getState().toggleMute("muted-series"); - const { shouldNotify } = useReleaseAnnouncementsStore.getState(); - expect( - shouldNotify({ - seriesId: "muted-series", - pluginId: "release-nyaa", - language: "en", - }), - ).toBe(false); - }); - - it("shouldNotify enforces language allowlist (case-insensitive)", () => { - useReleaseAnnouncementsStore.getState().setAllowedLanguages(["EN"]); - const { shouldNotify } = useReleaseAnnouncementsStore.getState(); - expect( - shouldNotify({ seriesId: "s1", pluginId: "p", language: "en" }), - ).toBe(true); - expect( - shouldNotify({ seriesId: "s1", pluginId: "p", language: "es" }), - ).toBe(false); - }); - - it("shouldNotify enforces plugin allowlist", () => { - useReleaseAnnouncementsStore - .getState() - .setAllowedPlugins(["release-mangaupdates"]); - const { shouldNotify } = useReleaseAnnouncementsStore.getState(); - expect( - shouldNotify({ - seriesId: "s1", - pluginId: "release-mangaupdates", - language: "en", - }), - ).toBe(true); - expect( - shouldNotify({ - seriesId: "s1", - pluginId: "release-nyaa", - language: "en", - }), - ).toBe(false); - }); - - it("toggleMute is reversible", () => { - const store = useReleaseAnnouncementsStore.getState(); - store.toggleMute("series-x"); - expect( - useReleaseAnnouncementsStore.getState().mutedSeriesIds.has("series-x"), - ).toBe(true); - useReleaseAnnouncementsStore.getState().toggleMute("series-x"); - expect( - useReleaseAnnouncementsStore.getState().mutedSeriesIds.has("series-x"), - ).toBe(false); - }); }); diff --git a/web/src/store/releaseAnnouncementsStore.ts b/web/src/store/releaseAnnouncementsStore.ts index a440c0e4..9659ac7c 100644 --- a/web/src/store/releaseAnnouncementsStore.ts +++ b/web/src/store/releaseAnnouncementsStore.ts @@ -1,77 +1,31 @@ import { create } from "zustand"; +/** + * Releases nav-badge counter. + * + * Notification *filters* (server-wide language + plugin allowlists, per-user + * mute list) used to live here too, but they belong on durable storage + * (settings + user_preferences) so they survive page reloads. This store now + * just tracks the in-session "unseen" badge count. + * + * The `shouldNotify` decision is made inside the SSE handler in + * `useEntityEvents` by snapshotting the latest filter values from the query + * cache; see that file for the predicate. + */ interface ReleaseAnnouncementsState { /** Number of unseen `release_announced` events since the user last visited /releases. */ unseenCount: number; - /** Per-series mute list (series IDs whose announcements should be ignored). */ - mutedSeriesIds: Set<string>; - /** Allowed languages; empty set means "all". Stored lower-case. */ - allowedLanguages: Set<string>; - /** Allowed plugin IDs; empty set means "all". */ - allowedPlugins: Set<string>; - /** Increment the badge counter (called by the SSE handler). */ + /** Increment the badge counter (called by the SSE handler when shouldNotify passes). */ bump: () => void; /** Reset the badge counter (called when the user visits /releases). */ reset: () => void; - /** Toggle a per-series mute. */ - toggleMute: (seriesId: string) => void; - /** Replace the language allowlist. */ - setAllowedLanguages: (languages: string[]) => void; - /** Replace the plugin allowlist. */ - setAllowedPlugins: (plugins: string[]) => void; - /** - * Decide whether an incoming event should bump the badge or surface as a - * toast. Pure function, exposed so the SSE handler and tests can share it. - */ - shouldNotify: (params: { - seriesId: string; - pluginId: string; - language: string; - }) => boolean; } export const useReleaseAnnouncementsStore = create<ReleaseAnnouncementsState>()( - (set, get) => ({ + (set) => ({ unseenCount: 0, - mutedSeriesIds: new Set<string>(), - allowedLanguages: new Set<string>(), - allowedPlugins: new Set<string>(), - bump: () => set((state) => ({ unseenCount: state.unseenCount + 1 })), reset: () => set({ unseenCount: 0 }), - - toggleMute: (seriesId) => - set((state) => { - const next = new Set(state.mutedSeriesIds); - if (next.has(seriesId)) { - next.delete(seriesId); - } else { - next.add(seriesId); - } - return { mutedSeriesIds: next }; - }), - - setAllowedLanguages: (languages) => - set({ - allowedLanguages: new Set(languages.map((l) => l.toLowerCase())), - }), - - setAllowedPlugins: (plugins) => set({ allowedPlugins: new Set(plugins) }), - - shouldNotify: ({ seriesId, pluginId, language }) => { - const { mutedSeriesIds, allowedLanguages, allowedPlugins } = get(); - if (mutedSeriesIds.has(seriesId)) return false; - if ( - allowedLanguages.size > 0 && - !allowedLanguages.has(language.toLowerCase()) - ) { - return false; - } - if (allowedPlugins.size > 0 && !allowedPlugins.has(pluginId)) { - return false; - } - return true; - }, }), ); diff --git a/web/src/types/api.generated.ts b/web/src/types/api.generated.ts index 417c5325..5523d804 100644 --- a/web/src/types/api.generated.ts +++ b/web/src/types/api.generated.ts @@ -13514,6 +13514,11 @@ export interface components { externalIdSource?: string | null; /** @description Content types this plugin can provide metadata for (e.g., ["series", "book"]) */ metadataProvider?: string[]; + /** + * @description Whether the plugin declares the `release_source` capability (announces + * new chapter / volume releases for tracked series). + */ + releaseSource?: boolean; /** @description Can sync user reading progress */ userReadSync?: boolean; /** @description Can provide personalized recommendations */ diff --git a/web/src/types/preferences.ts b/web/src/types/preferences.ts index 841981f4..150cc4af 100644 --- a/web/src/types/preferences.ts +++ b/web/src/types/preferences.ts @@ -15,6 +15,15 @@ export interface TypedPreferences { // Library preferences "library.show_deleted_books": boolean; + + // Release-tracking preferences + /** + * Series IDs whose `release_announced` events should NOT bump the badge or + * surface a toast for this user. The series detail page exposes a per-series + * mute toggle that writes here; the Release Tracking settings page exposes + * a "Clear all mutes" action that deletes the preference. + */ + "release_tracking.muted_series_ids": string[]; } /** @@ -28,6 +37,7 @@ export type PreferenceKey = keyof TypedPreferences; export const PREFERENCE_DEFAULTS: TypedPreferences = { "ui.theme": "system", "library.show_deleted_books": false, + "release_tracking.muted_series_ids": [], }; /** From 796af9a985d45acb062592056e575565adc49373 Mon Sep 17 00:00:00 2001 From: Sylvain Cau <ashdevfr@gmail.com> Date: Mon, 4 May 2026 17:11:12 -0700 Subject: [PATCH 11/29] feat(release-tracking): add releases/register_sources reverse-RPC Plugins implementing the release_source capability now declare their desired source rows via releases/register_sources from onInitialize. The host upserts each entry on (plugin_id, source_key), prunes rows the plugin no longer declares, and reconciles the scheduler so new rows start polling without a server restart. User-managed fields (enabled, poll_interval_s) survive plugin restarts. Threads the scheduler Arc through PluginManager -> PluginHandle -> ReleasesRequestHandler so the handler can reconcile in-place. Adds upsert / list_by_plugin / delete_by_plugin_excluding to the repo. Extends ReleasePollRequest with sourceKey + config so plugins owning multiple rows can dispatch directly. feat(release-nyaa): one source row per uploader Refactors the Nyaa plugin to materialize one release_sources row per uploader subscription via releases/register_sources, with stable kind:identifier source keys (user:tsuna69, query:luminousscans, params:c=3_1&q=berserk). poll() now resolves a single subscription from the host's per-poll config snapshot and fetches just that uploader's feed, replacing the previous "one row, walk all subscriptions" workaround. Each row gets its own ETag and last-error status. Registration is deferred to a microtask + retries on METHOD_NOT_FOUND to ride out the brief race where the host has not yet installed the releases reverse-RPC handler after onInitialize returns. feat(release-mangaupdates): auto-register single static source row The plugin now registers one release_sources row ("MangaUpdates Releases", kind=rss-series, sourceKey=default) on initialize so users see a configurable row in Settings -> Release tracking out of the box. No admin config is required. --- config/seed-config.sample.yaml | 20 + docs/docs/plugins/release-mangaupdates.md | 4 + docs/docs/plugins/release-nyaa.md | 52 +- plugins/release-mangaupdates/src/index.ts | 51 +- plugins/release-mangaupdates/src/manifest.ts | 2 +- plugins/release-nyaa/src/fetcher.test.ts | 58 ++ plugins/release-nyaa/src/fetcher.ts | 121 +++- plugins/release-nyaa/src/index.test.ts | 73 ++- plugins/release-nyaa/src/index.ts | 182 ++++-- plugins/release-nyaa/src/manifest.ts | 6 +- plugins/sdk-typescript/src/types/releases.ts | 69 ++- src/commands/seed.rs | 8 +- src/commands/serve.rs | 3 +- src/db/repositories/release_sources.rs | 207 +++++++ src/services/plugin/handle.rs | 20 + src/services/plugin/manager.rs | 22 + src/services/plugin/protocol.rs | 30 + src/services/plugin/releases_handler.rs | 533 +++++++++++++++++- src/tasks/handlers/poll_release_source.rs | 2 + .../series/SeriesReleasesPanel.test.tsx | 10 +- .../components/series/SeriesReleasesPanel.tsx | 2 +- web/src/pages/ReleasesInbox.test.tsx | 10 +- web/src/pages/ReleasesInbox.tsx | 4 +- 23 files changed, 1367 insertions(+), 122 deletions(-) diff --git a/config/seed-config.sample.yaml b/config/seed-config.sample.yaml index a5b3867c..680b2a9b 100644 --- a/config/seed-config.sample.yaml +++ b/config/seed-config.sample.yaml @@ -116,6 +116,10 @@ plugins: # MangaUpdates Releases - Translation/scanlation release feed (no credentials needed) # Release-source plugins are gated by manifest capability at reverse-RPC # dispatch; they don't write metadata, so no `permissions` or `scopes` are needed. + # + # On first start the plugin auto-registers a single source row visible at + # Settings → Release tracking. Optional `config.blockedGroups` (CSV) filters + # noisy scanlators server-wide. - name: release-mangaupdates display_name: MangaUpdates Releases description: Announces new chapter releases for tracked series via MangaUpdates per-series RSS feeds @@ -124,8 +128,22 @@ plugins: permissions: [] scopes: [] credential_delivery: env + # config: + # blockedGroups: "LowQualityScans,MTL Group" # Nyaa Releases - Acquisition-pointer source for trusted uploaders (no credentials needed) + # + # The `uploaders` config below drives one source row per entry at + # Settings → Release tracking. Each entry is one of: + # `username` -> Nyaa user feed (https://nyaa.si/?page=rss&u=<username>) + # `q:<text>` -> plain site-wide search + # `q:?<key=value>` -> URL-style search with allowlisted keys (q, c, f, u), + # e.g. `q:?c=3_1&q=Berserk` for English-translated + # Literature category. See plugin docs for details. + # + # Removing an entry and re-saving prunes its source row (and its + # release_ledger history). Per-source poll interval / enable / "Poll now" + # live in the Release tracking UI. - name: release-nyaa display_name: Nyaa Releases description: Announces new chapter / volume torrents for tracked series via Nyaa.si uploader RSS feeds @@ -134,6 +152,8 @@ plugins: permissions: [] scopes: [] credential_delivery: env + # config: + # uploaders: "tsuna69,TankobonBlur,q:?c=3_1&q=Berserk" # ============================================================================= # Libraries diff --git a/docs/docs/plugins/release-mangaupdates.md b/docs/docs/plugins/release-mangaupdates.md index 48aedaa5..b2eaae91 100644 --- a/docs/docs/plugins/release-mangaupdates.md +++ b/docs/docs/plugins/release-mangaupdates.md @@ -16,6 +16,8 @@ The MangaUpdates Releases plugin announces new chapter and volume releases for t ## How it works +The plugin auto-registers a single source row (`MangaUpdates Releases`) on first start. Unlike Nyaa (one row per uploader), MangaUpdates polls every tracked series with a `mangaupdates` external ID under one logical feed, so a single row is the right model. You'll find the row in **Settings → Release tracking** along with its enable toggle, poll-interval input, and "Poll now" button. + 1. Codex schedules a poll for the source row (default: once per 24 hours). 2. The plugin asks the host for tracked series scoped to those with a `mangaupdates` external ID. 3. For each series, the plugin GETs `https://api.mangaupdates.com/v1/series/{id}/rss`. @@ -28,6 +30,8 @@ The plugin **never** downloads release files. The "Open" link on the inbox row s ## Setup +The plugin works out of the box once installed and enabled — no required config. The single source row is materialized on first start. The two things you'll typically configure are: which **languages** announcements should pass through (see below), and optionally a **scanlation group blocklist** for noisy groups. + ### Populating MangaUpdates IDs For the plugin to find any tracked series, those series need a `mangaupdates` external ID. There are two ways to populate this: diff --git a/docs/docs/plugins/release-nyaa.md b/docs/docs/plugins/release-nyaa.md index 8d63c9f1..002c4edb 100644 --- a/docs/docs/plugins/release-nyaa.md +++ b/docs/docs/plugins/release-nyaa.md @@ -27,34 +27,48 @@ Use Nyaa when you've already decided on a small allowlist of trusted uploaders ( ## How it works -1. Codex schedules a poll for the source row (default: once per 24 hours). -2. The plugin reads the configured uploader subscription list. -3. The plugin asks the host for tracked series along with their aliases (`releases/list_tracked` with `requires_aliases: true`). -4. For each subscription, the plugin fetches the Nyaa feed: +The plugin auto-materializes one **release source row** per uploader entry on first start (and on every config save, which restarts the plugin). + +1. You set the plugin's `uploaders` config to a comma-separated list (see [Setup](#setup) below). +2. On startup the plugin parses the list and calls `releases/register_sources` over the host RPC channel. The host upserts one row per entry in `release_sources` keyed on `(plugin_id, sourceKey)` where `sourceKey` is `kind:identifier` (e.g. `user:tsuna69`, `query:luminousscans`, `params:c=3_1&q=berserk`). +3. Each row gets its own poll cadence (default 24h, overridable in **Settings → Release tracking**), its own ETag, and its own last-error / last-polled status. The scheduler fires one `releases/poll` task per row. +4. When the host calls `releases/poll(sourceId, sourceKey, config, etag)`, the plugin recovers the subscription from `config.subscription` and fetches just that uploader's feed: - User feed: `https://nyaa.si/?page=rss&u=<username>` - - Search feed (for groups without a user account): `https://nyaa.si/?page=rss&q=<query>` + - Plain search: `https://nyaa.si/?page=rss&q=<query>` + - URL-style params: `https://nyaa.si/?page=rss&<allowlisted-params>` 5. Each RSS item is parsed: a leading `[Group]` token, chapter / volume token (single or range), and parenthesized format hints are extracted; the remaining text is the *series guess*. 6. The series guess is normalized and matched against tracked-series aliases. Confidence ≥ 0.95 on exact normalized match; otherwise the matcher computes a token-level Dice ratio and rejects below 0.85. 7. Matching candidates are submitted to the host's release ledger via `releases/record`. The host applies its threshold (default 0.7) and dedups on `(source_id, external_release_id)` and on `info_hash` (Nyaa's `nyaa:infoHash` element). +Removing an entry from the `uploaders` list and re-saving prunes the corresponding row and its `release_ledger` history (cascade delete). User-managed fields (`enabled`, `pollIntervalS`) survive plugin restarts. + The plugin **never** downloads release files. The "Open" link on the inbox row sends you to the Nyaa view page or the `.torrent` URL; how you acquire the chapter is up to you. ## Setup -### Configure uploader subscriptions +### 1. Configure uploader subscriptions -The plugin's `uploaders` admin field is a comma-separated list of trusted uploader handles or queries: +The plugin's `uploaders` admin field is a comma-separated list of trusted uploader handles or queries. Each entry takes one of three forms: -``` -uploaders: "1r0n,TankobonBlur,q:LuminousScans" +| Form | Example | What it polls | +| ----------------- | ------------------------ | ---------------------------------------------------------------------------- | +| `username` | `tsuna69` | `https://nyaa.si/?page=rss&u=tsuna69` — that uploader's full RSS feed. | +| `q:<text>` | `q:LuminousScans` | `https://nyaa.si/?page=rss&q=LuminousScans` — a plain site-wide search. | +| `q:?<key=value>` | `q:?c=3_1&q=Berserk` | URL-style search with allowlisted keys: `q`, `c`, `f`, `u`. The example here scopes a search to the Literature → English-translated category. | + +Mix freely: + +```json +{ + "uploaders": "tsuna69,TankobonBlur,q:LuminousScans,q:?c=3_1&q=Berserk" +} ``` -- Plain identifier (`1r0n`) → user feed (`https://nyaa.si/?page=rss&u=1r0n`). -- `q:<query>` or `query:<query>` → search feed (`https://nyaa.si/?page=rss&q=<query>`). Use this for groups without a Nyaa account, or to scope by tag. +Empty tokens are dropped; case-insensitive duplicates are silently deduplicated. URL-style entries normalize their param order so `q:?q=X&c=3_1` and `q:?c=3_1&q=X` collapse to the same source row. Anything not on the allowlist (`s=`, `o=`, etc.) is dropped without error. -Empty tokens are dropped; case-insensitive duplicates are silently deduplicated. The plugin walks subscriptions in declaration order on each poll. +After saving, head to **Settings → Release tracking** to see the per-source rows the plugin registered. Each row has its own enable toggle, poll-interval input, and "Poll now" button. Disabling a row pauses its scheduled polls; deleting an entry from the `uploaders` CSV (and saving) removes the row entirely. -### Make sure tracked series have aliases +### 2. Make sure tracked series have aliases Nyaa releases identify a series only by name in the title. The plugin matches titles to series via the `series_aliases` table: @@ -63,25 +77,19 @@ Nyaa releases identify a series only by name in the title. The plugin matches ti For best results, add aliases that mirror how your trusted uploaders name the release. Example: 1r0n names `Boruto: Two Blue Vortex` as `[1r0n] Boruto - Two Blue Vortex - Volume NN (Digital)`. The default normalization produces `boruto two blue vortex` from both forms, so an exact match is automatic — but if you track *Boruto* with only the alias `Boruto`, the matcher will see `boruto two blue vortex` and reject it as not similar enough to `boruto`. -### Source row - -A `release_sources` row with `plugin_id="release-nyaa"` and `kind="rss-uploader"` must exist before the scheduler will poll. (See [Release tracking architecture](../architecture/release-tracking.md) for the broader picture; admin UI to create and manage source rows is tracked as a follow-up.) - ## Configuration reference | Field | Scope | Default | Notes | | ------------------ | ------------ | ---------------------- | -------------------------------------------------------------------------------------------------- | -| `uploaders` | admin | `""` | Comma-separated subscription list. Plain identifier = user feed; `q:<query>` = search feed. | +| `uploaders` | admin | `""` | Comma-separated subscription list. See the table above for the three accepted entry forms. | | `requestTimeoutMs` | admin | `10000` | Hard timeout per Nyaa fetch. Clamped to `[1000, 60000]`. | | `baseUrl` | admin | `https://nyaa.si` | Override base URL — useful for mirrors. Trailing slashes are trimmed. | ## Limitations -- **One source row, many uploaders.** The plan called for one source row per uploader subscription, but the host has no admin endpoint for creating `release_sources` rows yet. Until that ships, all uploader subscriptions ride a single source row's poll cadence and ETag bucket. With daily polls the difference is academic; if you're adding many uploaders or want per-uploader poll intervals, this will need revisiting. -- **ETag is single-bucket.** The source row stores one ETag — the plugin uses it on the *first* uploader fetched and walks subsequent uploaders unconditionally. Daily polls + small RSS bodies make this acceptable; per-subscription ETags would need per-(source, subscription) state. -- **Language is hardcoded to English.** Nyaa releases don't carry a language tag, and 99% of the uploaders this plugin targets release English-language scans. Admins who add non-English uploaders should configure tracked series' `languages` accordingly so the host's `latest_known_*` advance gate doesn't pollute the high-water mark with releases the user can't read. +- **Language is hardcoded to English.** Nyaa releases don't carry a language tag, and the uploaders this plugin targets predominantly release English-language scans. Admins who add non-English uploaders should configure tracked series' `languages` accordingly so the host's `latest_known_*` advance gate doesn't pollute the high-water mark with releases the user can't read. - **Title parsing is best-effort.** The corpus covers the common 1r0n / TankobonBlur shapes plus generic `Volume NN` / `Chapter NNN` forms. Edge-case titles (e.g. unusual punctuation, missing separators) may parse with an empty `seriesGuess`; the matcher silently rejects those entries (no false positives). -- **No per-uploader confidence weighting in v1.** Every matched candidate gets the same confidence based on the alias match alone. Adding per-uploader trust scores (downgrade an uploader after N user dismissals) is on the roadmap but not load-bearing at v1's tracked-series scale. +- **No per-uploader confidence weighting yet.** Every matched candidate gets the same confidence based on the alias match alone. Adding per-uploader trust scores (downgrade an uploader after N user dismissals) is a future enhancement. ## Risks diff --git a/plugins/release-mangaupdates/src/index.ts b/plugins/release-mangaupdates/src/index.ts index 523edf22..a360b7f2 100644 --- a/plugins/release-mangaupdates/src/index.ts +++ b/plugins/release-mangaupdates/src/index.ts @@ -357,6 +357,45 @@ async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<Rel // Plugin Initialization // ============================================================================= +/** + * Register a single static source row representing the MangaUpdates batch + * feed. Unlike Nyaa (one row per uploader), MangaUpdates polls all tracked + * series under one logical feed, so we always declare exactly one row keyed + * `default`. Retries on `METHOD_NOT_FOUND` to handle the brief race where + * the host has not yet installed the releases reverse-RPC handler. + */ +export async function registerSources( + rpc: HostRpcClient, +): Promise<{ registered: number; pruned: number } | null> { + const sources = [ + { + sourceKey: "default", + displayName: "MangaUpdates Releases", + kind: "rss-series" as const, + config: null, + }, + ]; + const maxAttempts = 5; + for (let attempt = 1; attempt <= maxAttempts; attempt++) { + try { + return await rpc.call<{ registered: number; pruned: number }>( + RELEASES_METHODS.REGISTER_SOURCES, + { sources }, + ); + } catch (err) { + const isMethodNotFound = err instanceof HostRpcError && err.code === -32601; + if (isMethodNotFound && attempt < maxAttempts) { + await new Promise((r) => setTimeout(r, 50 * attempt)); + continue; + } + const reason = err instanceof Error ? err.message : String(err); + logger.error(`register_sources failed: ${reason}`); + return null; + } + } + return null; +} + createReleaseSourcePlugin({ manifest, provider: { @@ -368,7 +407,7 @@ createReleaseSourcePlugin({ }, }, logLevel: "info", - onInitialize(params: InitializeParams) { + async onInitialize(params: InitializeParams) { state.hostRpc = params.hostRpc; const ac = params.adminConfig ?? {}; if (typeof ac.blockedGroups === "string") { @@ -380,6 +419,16 @@ createReleaseSourcePlugin({ logger.info( `initialized: blockedGroups=${state.blockedGroupsCsv ? "set" : "empty"} timeoutMs=${state.requestTimeoutMs} defaultPoll=${DEFAULT_POLL_INTERVAL_S}s`, ); + + // Materialize the single static source row. Deferred to a microtask so + // we run *after* the host installs the releases reverse-RPC handler. + queueMicrotask(() => { + void registerSources(params.hostRpc).then((result) => { + if (result) { + logger.info(`register_sources: registered=${result.registered} pruned=${result.pruned}`); + } + }); + }); }, }); diff --git a/plugins/release-mangaupdates/src/manifest.ts b/plugins/release-mangaupdates/src/manifest.ts index cb8b3a21..5aa71ef8 100644 --- a/plugins/release-mangaupdates/src/manifest.ts +++ b/plugins/release-mangaupdates/src/manifest.ts @@ -62,7 +62,7 @@ export const manifest = { userDescription: "Announces new chapters for series you've tracked, using their MangaUpdates IDs. Filters releases to languages you can read. Notification-only — Codex does not download anything.", adminSetupInstructions: - "No credentials are required: MangaUpdates' per-series RSS feeds are public. To get announcements for a series, edit its tracking panel and either paste a `mangaupdates` external ID or rely on the metadata-refresh path to populate it from MangaBaka cross-references. Optional language preferences and group blocklists can be configured per-series; defaults come from server settings (`release_tracking.default_languages`).", + "1. No config is required to get started — saving the plugin is enough. The plugin auto-registers a single source row (`MangaUpdates Releases`) in **Settings → Release tracking** on first start, where you can disable it, change the poll interval, or hit *Poll now*. 2. To get announcements for a series, edit its tracking panel and either paste a `mangaupdates` external ID or let the metadata-refresh path populate it from MangaBaka cross-references. 3. Optional: set `blockedGroups` (CSV, case-insensitive) to filter noisy scanlators server-wide; per-series language preferences live on each series' tracking config and override the server default (`release_tracking.default_languages`). No credentials are needed; MangaUpdates RSS feeds are public.", } as const satisfies PluginManifest & { capabilities: { releaseSource: { kinds: ["rss-series"] } }; }; diff --git a/plugins/release-nyaa/src/fetcher.test.ts b/plugins/release-nyaa/src/fetcher.test.ts index 558ac30e..2380f3e1 100644 --- a/plugins/release-nyaa/src/fetcher.test.ts +++ b/plugins/release-nyaa/src/fetcher.test.ts @@ -38,6 +38,59 @@ describe("parseSubscriptionToken", () => { expect(parseSubscriptionToken("q:")).toBeNull(); expect(parseSubscriptionToken("query: ")).toBeNull(); }); + + it("parses `q:?key=value&…` as URL-style allowlisted params", () => { + expect(parseSubscriptionToken("q:?c=3_1&q=Berserk")).toEqual({ + kind: "params", + identifier: "c=3_1&q=Berserk", + }); + }); + + it("normalizes URL-style param order so reorderings dedupe", () => { + const a = parseSubscriptionToken("q:?q=Berserk&c=3_1"); + const b = parseSubscriptionToken("q:?c=3_1&q=Berserk"); + expect(a).toEqual(b); + }); + + it("URL-encodes special characters in URL-style params", () => { + expect(parseSubscriptionToken("q:?q=Berserk Volume")).toEqual({ + kind: "params", + identifier: "q=Berserk+Volume", + }); + }); + + it("drops keys that aren't on the allowlist", () => { + expect(parseSubscriptionToken("q:?q=Berserk&s=size&o=desc")).toEqual({ + kind: "params", + identifier: "q=Berserk", + }); + }); + + it("returns null when no allowlisted keys remain", () => { + expect(parseSubscriptionToken("q:?s=size&o=desc")).toBeNull(); + expect(parseSubscriptionToken("q:?")).toBeNull(); + }); + + it("collapses `q:?u=<x>` (only u) to a bare user token for dedup", () => { + expect(parseSubscriptionToken("q:?u=1r0n")).toEqual({ + kind: "user", + identifier: "1r0n", + }); + }); + + it("keeps `q:?u=…&c=…` as params so the category survives", () => { + expect(parseSubscriptionToken("q:?u=1r0n&c=3_1")).toEqual({ + kind: "params", + identifier: "c=3_1&u=1r0n", + }); + }); + + it("ignores empty values in URL-style params", () => { + expect(parseSubscriptionToken("q:?c=&q=Berserk")).toEqual({ + kind: "params", + identifier: "q=Berserk", + }); + }); }); describe("parseSubscriptionList", () => { @@ -83,6 +136,11 @@ describe("feedUrl", () => { const url = feedUrl({ kind: "user", identifier: "x" }, "https://mirror.example/"); expect(url).toBe("https://mirror.example/?page=rss&u=x"); }); + + it("builds a URL from a params-kind subscription verbatim", () => { + const url = feedUrl({ kind: "params", identifier: "c=3_1&q=Berserk" }); + expect(url).toBe("https://nyaa.si/?page=rss&c=3_1&q=Berserk"); + }); }); // ----------------------------------------------------------------------------- diff --git a/plugins/release-nyaa/src/fetcher.ts b/plugins/release-nyaa/src/fetcher.ts index cef7a15d..dd31cc27 100644 --- a/plugins/release-nyaa/src/fetcher.ts +++ b/plugins/release-nyaa/src/fetcher.ts @@ -40,20 +40,72 @@ export interface FetcherOptions { export const NYAA_BASE_URL = "https://nyaa.si"; /** - * One uploader subscription entry. Either a Nyaa username (`kind: "user"`) or - * an arbitrary search query (`kind: "query"`) for groups without an account. + * One uploader subscription entry. + * + * Three shapes: + * - `user` — pulls `?page=rss&u=<identifier>` (a Nyaa user feed). + * - `query` — pulls `?page=rss&q=<identifier>` (a plain text search). + * - `params` — pulls `?page=rss&<params>` where `<params>` is an + * allowlisted set of Nyaa query keys (`q`, `c`, `f`). Used to express + * category / filter combinations like the Literature → English-translated + * view (`c=3_1`). */ export type UploaderSubscription = | { kind: "user"; identifier: string } - | { kind: "query"; identifier: string }; + | { kind: "query"; identifier: string } + | { kind: "params"; identifier: string }; + +/** + * Keys allowed through from a `q:?…` URL-style token. `page` is always + * injected by the plugin and can't be overridden; anything not in this set + * is silently dropped to keep the surface tight. + */ +const PARAMS_ALLOWLIST = new Set(["q", "c", "f", "u"]); + +/** + * Parse a `q:?key=value&…` body into a normalized, allowlisted query string. + * Returns null when no allowlisted keys remain (caller drops the token). + * + * Normalization sorts params alphabetically so two tokens that differ only + * in key order dedupe to the same identifier. + */ +function parseUrlParams(body: string): { kind: "user" | "params"; identifier: string } | null { + const params = new URLSearchParams(body); + const kept: [string, string][] = []; + for (const [rawKey, rawValue] of params.entries()) { + const key = rawKey.toLowerCase(); + if (!PARAMS_ALLOWLIST.has(key)) continue; + const value = rawValue.trim(); + if (value.length === 0) continue; + kept.push([key, value]); + } + if (kept.length === 0) return null; + + // If the *only* allowlisted key is `u`, collapse to a plain user token so + // `q:?u=1r0n` dedupes against the bare `1r0n` form and reuses the same + // URL-building branch. + if (kept.length === 1 && kept[0]?.[0] === "u") { + return { kind: "user", identifier: kept[0][1] }; + } + + kept.sort(([a], [b]) => (a < b ? -1 : a > b ? 1 : 0)); + const normalized = new URLSearchParams(kept).toString(); + return { kind: "params", identifier: normalized }; +} /** * Parse a single uploader subscription token. * * Tokens look like: - * - `1r0n` → user - * - `q:LuminousScans` → query - * - `query:Manga Group` → query (long form) + * - `1r0n` → user feed + * - `q:LuminousScans` → plain search query + * - `query:Manga Group` → plain search query (long form) + * - `q:?c=3_1&q=Berserk` → URL-style params (allowlisted: q, c, f, u) + * - `query:?u=1r0n&c=3_1` → URL-style params, treated as user feed + * + * The leading `?` after `q:` / `query:` is the opt-in switch into URL mode, + * which keeps `q:c=3_1&q=Berserk` (no `?`) parsing as a literal search term + * for backwards compatibility. * * Empty / whitespace-only tokens return null (caller should drop them). */ @@ -61,20 +113,57 @@ export function parseSubscriptionToken(raw: string): UploaderSubscription | null const trimmed = raw.trim(); if (trimmed.length === 0) return null; - // `q:` / `query:` prefix → arbitrary search query. We match the prefix - // separately from the body so an empty query (`q:`, `query: `) returns - // null rather than falling through to "user". + // `q:` / `query:` prefix → search query, in either plain or URL-params form. const prefixMatch = trimmed.match(/^(q|query):(.*)$/i); if (prefixMatch) { - const q = (prefixMatch[2] ?? "").trim(); - if (q.length === 0) return null; - return { kind: "query", identifier: q }; + const body = (prefixMatch[2] ?? "").trim(); + if (body.length === 0) return null; + + if (body.startsWith("?")) { + return parseUrlParams(body.slice(1)); + } + return { kind: "query", identifier: body }; } // Plain identifier → username feed. return { kind: "user", identifier: trimmed }; } +/** + * Build a stable per-plugin source key for a subscription. Mirrors the + * dedup key used in `parseSubscriptionList` so two ways of writing the + * same subscription collapse to the same source row. + * + * Used by `releases/register_sources` (to declare the plugin-owned key for + * each row) and as a fallback when reconstructing a subscription from a + * source key whose `config` is missing. Lower-cased identifier preserves + * the existing case-insensitive dedup behaviour. + */ +export function subscriptionToSourceKey(sub: UploaderSubscription): string { + return `${sub.kind}:${sub.identifier.toLowerCase()}`; +} + +/** + * Inverse of `subscriptionToSourceKey`: parse a `kind:identifier` source key + * back into a subscription. Returns null for unrecognized keys (older rows + * from a previous plugin version, manual edits, etc.) so the caller can log + * and skip without crashing the whole poll. + * + * Note: the identifier coming back is lower-cased (per the source key + * convention). Nyaa is case-insensitive on usernames and search terms, so + * the round-trip is lossless for our purposes. + */ +export function sourceKeyToSubscription(key: string): UploaderSubscription | null { + const idx = key.indexOf(":"); + if (idx <= 0 || idx === key.length - 1) return null; + const kind = key.slice(0, idx); + const identifier = key.slice(idx + 1); + if (kind === "user" || kind === "query" || kind === "params") { + return { kind, identifier }; + } + return null; +} + /** * Parse the admin `uploaders` CSV into a clean list of subscriptions. * Skips empty tokens; preserves order; deduplicates. @@ -86,7 +175,7 @@ export function parseSubscriptionList(raw: unknown): UploaderSubscription[] { for (const token of raw.split(",")) { const sub = parseSubscriptionToken(token); if (sub === null) continue; - const key = `${sub.kind}:${sub.identifier.toLowerCase()}`; + const key = subscriptionToSourceKey(sub); if (seen.has(key)) continue; seen.add(key); out.push(sub); @@ -103,7 +192,11 @@ export function feedUrl( if (subscription.kind === "user") { return `${base}/?page=rss&u=${encodeURIComponent(subscription.identifier)}`; } - return `${base}/?page=rss&q=${encodeURIComponent(subscription.identifier)}`; + if (subscription.kind === "query") { + return `${base}/?page=rss&q=${encodeURIComponent(subscription.identifier)}`; + } + // params: identifier is already a URL-encoded, allowlisted query string. + return `${base}/?page=rss&${subscription.identifier}`; } /** diff --git a/plugins/release-nyaa/src/index.test.ts b/plugins/release-nyaa/src/index.test.ts index be4a69a3..a6676ecc 100644 --- a/plugins/release-nyaa/src/index.test.ts +++ b/plugins/release-nyaa/src/index.test.ts @@ -1,6 +1,6 @@ -import { HostRpcClient } from "@ashdev/codex-plugin-sdk"; +import { HostRpcClient, HostRpcError } from "@ashdev/codex-plugin-sdk"; import { describe, expect, it, vi } from "vitest"; -import { pollSubscription } from "./index.js"; +import { pollSubscription, registerSources } from "./index.js"; import type { AliasCandidate } from "./matcher.js"; // ----------------------------------------------------------------------------- @@ -32,8 +32,10 @@ function makeMockRpc(respond: (method: string, params: unknown) => unknown): { try { result = respond(req.method, req.params); } catch (err) { + // Preserve HostRpcError.code so tests can simulate METHOD_NOT_FOUND etc. + const code = err instanceof HostRpcError ? err.code : -32_000; error = { - code: -32_000, + code, message: err instanceof Error ? err.message : "synthetic error", }; } @@ -250,3 +252,68 @@ describe("pollSubscription", () => { expect(calls.filter((c) => c.method === "releases/record")).toHaveLength(0); }); }); + +// ----------------------------------------------------------------------------- +// registerSources +// ----------------------------------------------------------------------------- + +describe("registerSources", () => { + it("emits one source per subscription with stable kind:identifier keys", async () => { + const { rpc, calls } = makeMockRpc(() => ({ registered: 3, pruned: 0 })); + const result = await registerSources(rpc, [ + { kind: "user", identifier: "tsuna69" }, + { kind: "query", identifier: "LuminousScans" }, + { kind: "params", identifier: "c=3_1&q=Berserk" }, + ]); + expect(result).toEqual({ registered: 3, pruned: 0 }); + + const reg = calls.find((c) => c.method === "releases/register_sources"); + expect(reg).toBeDefined(); + if (!reg) return; + const payload = reg.params as { + sources: { sourceKey: string; displayName: string; kind: string; config: unknown }[]; + }; + const keys = payload.sources.map((s) => s.sourceKey); + expect(keys).toEqual(["user:tsuna69", "query:luminousscans", "params:c=3_1&q=berserk"]); + expect(payload.sources.every((s) => s.kind === "rss-uploader")).toBe(true); + // Round-trip data: config carries the original (case-preserving) subscription. + const userSrc = payload.sources[0]; + expect( + (userSrc?.config as { subscription: { identifier: string } }).subscription.identifier, + ).toBe("tsuna69"); + }); + + it("retries on METHOD_NOT_FOUND while the host installs the handler", async () => { + let calls = 0; + const { rpc } = makeMockRpc(() => { + calls++; + if (calls < 3) { + throw new HostRpcError("Method not found", -32601); + } + return { registered: 1, pruned: 0 }; + }); + const result = await registerSources(rpc, [{ kind: "user", identifier: "a" }]); + expect(result).toEqual({ registered: 1, pruned: 0 }); + expect(calls).toBe(3); + }); + + it("does not retry on non-method-not-found errors", async () => { + let calls = 0; + const { rpc } = makeMockRpc(() => { + calls++; + throw new HostRpcError("server boom", -32000); + }); + const result = await registerSources(rpc, [{ kind: "user", identifier: "a" }]); + expect(result).toBeNull(); + expect(calls).toBe(1); + }); + + it("sends an empty list when no subscriptions are configured (host wipes plugin's rows)", async () => { + const { rpc, calls } = makeMockRpc(() => ({ registered: 0, pruned: 2 })); + const result = await registerSources(rpc, []); + expect(result).toEqual({ registered: 0, pruned: 2 }); + const reg = calls.find((c) => c.method === "releases/register_sources"); + expect(reg).toBeDefined(); + expect((reg?.params as { sources: unknown[] }).sources).toEqual([]); + }); +}); diff --git a/plugins/release-nyaa/src/index.ts b/plugins/release-nyaa/src/index.ts index 5784415e..38dea6a0 100644 --- a/plugins/release-nyaa/src/index.ts +++ b/plugins/release-nyaa/src/index.ts @@ -9,29 +9,26 @@ * candidate is silently dropped (the host's threshold would reject it * anyway). * + * Source-row model: + * - On `onInitialize` (which the host re-runs after every config save), + * the plugin parses the admin's `uploaders` CSV and calls + * `releases/register_sources` with one entry per subscription. The host + * materializes one `release_sources` row per uploader, keyed on + * `(plugin_id, sourceKey)` where `sourceKey` is `kind:identifier` + * (e.g. `user:tsuna69`, `query:luminousscans`, `params:c=3_1&q=berserk`). + * - The host scheduler fires one `releases/poll` task per source row, so + * each uploader has its own poll cadence, ETag, and last-error status. + * * Flow per `releases/poll`: - * 1. Read uploader subscriptions from admin config. + * 1. Recover the subscription from `params.config.subscription` (or fall + * back to parsing `params.sourceKey`). * 2. Pull tracked-series + aliases from the host * (`releases/list_tracked`). - * 3. For each subscription, conditional GET the RSS feed (ETag stored on - * the source row; we don't have per-subscription state slots). + * 3. Conditional GET the RSS feed using `params.etag`. * 4. Parse each item; match against tracked aliases; emit a candidate via * `releases/record`. - * 5. Aggregate the worst upstream status across all subscriptions for the - * host's per-host backoff layer. - * - * Design notes: - * - **One source row, many uploaders.** The plan calls for "one source - * row per uploader", but the host has no admin endpoint for creating - * `release_sources` rows; admins create one row when enabling the - * plugin and the plugin walks all subscriptions during a single poll. - * Mirrors how MangaUpdates polls all tracked series within one source - * row's `poll(sourceId)` call. - * - **ETag is a single bucket.** The source row stores one ETag — we use - * it on the *first* uploader fetched and rotate fresh ETags out of the - * response on subsequent polls. Daily polls + small RSS bodies make - * this acceptable; per-subscription ETags would need per-(source, - * subscription) state, deferred. + * 5. Return the new ETag and upstream status for the host's per-host + * backoff layer. */ import { @@ -49,6 +46,8 @@ import { import { fetchSubscriptionFeed, parseSubscriptionList, + sourceKeyToSubscription, + subscriptionToSourceKey, type UploaderSubscription, } from "./fetcher.js"; import { @@ -315,11 +314,39 @@ export async function pollSubscription( // Top-level poll handler // ============================================================================= +/** + * Resolve the subscription this poll request is for. The host stamps every + * `release_sources` row with its plugin-defined `config` (set at register + * time), so the preferred path is `params.config.subscription`. If a row + * pre-dates the config field (e.g. created in a previous plugin version), + * fall back to parsing `params.sourceKey`. + */ +function resolveSubscription(params: ReleasePollRequest): UploaderSubscription | null { + const cfg = params.config as { subscription?: unknown } | undefined | null; + const fromConfig = cfg?.subscription; + if (fromConfig && typeof fromConfig === "object") { + const obj = fromConfig as Record<string, unknown>; + const kind = obj.kind; + const identifier = obj.identifier; + if ( + typeof identifier === "string" && + identifier.length > 0 && + (kind === "user" || kind === "query" || kind === "params") + ) { + return { kind, identifier }; + } + } + if (typeof params.sourceKey === "string" && params.sourceKey.length > 0) { + return sourceKeyToSubscription(params.sourceKey); + } + return null; +} + async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<ReleasePollResponse> { const sourceId = params.sourceId; - - if (state.subscriptions.length === 0) { - logger.warn("no uploader subscriptions configured; nothing to poll"); + const subscription = resolveSubscription(params); + if (subscription === null) { + logger.warn(`source=${sourceId} no resolvable subscription on poll request; skipping`); return { notModified: false, upstreamStatus: 200 }; } @@ -330,45 +357,27 @@ async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<Rel return { notModified: false, upstreamStatus: 200 }; } - let parsed = 0; - let matched = 0; - let recorded = 0; - let worstStatus = 200; - let lastEtag: string | null = null; - - // 2. Walk subscriptions in declaration order. We use the ETag stored on - // the source row (passed as `params.etag`) for the *first* fetch; - // subsequent fetches start fresh because the ETag belongs to whichever - // subscription was polled last, not this one. - let firstFetch = true; - for (const sub of state.subscriptions) { - const outcome = await pollSubscription(rpc, sourceId, sub, tracked, { - previousEtag: firstFetch ? (params.etag ?? null) : null, - timeoutMs: state.requestTimeoutMs, - minConfidence: state.minConfidence, - ...(state.baseUrl ? { baseUrl: state.baseUrl } : {}), - }); - firstFetch = false; - parsed += outcome.parsed; - matched += outcome.matched; - recorded += outcome.recorded; - if (outcome.upstreamStatus > worstStatus) worstStatus = outcome.upstreamStatus; - if (outcome.etag) lastEtag = outcome.etag; - if (outcome.error) { - logger.warn( - `subscription ${sub.kind}:${sub.identifier}: ${outcome.error} (status ${outcome.upstreamStatus})`, - ); - } + // 2. Conditional GET against this subscription's feed. + const outcome = await pollSubscription(rpc, sourceId, subscription, tracked, { + previousEtag: params.etag ?? null, + timeoutMs: state.requestTimeoutMs, + minConfidence: state.minConfidence, + ...(state.baseUrl ? { baseUrl: state.baseUrl } : {}), + }); + if (outcome.error) { + logger.warn( + `source=${sourceId} ${subscription.kind}:${subscription.identifier}: ${outcome.error} (status ${outcome.upstreamStatus})`, + ); } logger.info( - `poll complete: source=${sourceId} subscriptions=${state.subscriptions.length} tracked=${tracked.length} parsed=${parsed} matched=${matched} recorded=${recorded} worst_status=${worstStatus}`, + `poll complete: source=${sourceId} subscription=${subscription.kind}:${subscription.identifier} tracked=${tracked.length} parsed=${outcome.parsed} matched=${outcome.matched} recorded=${outcome.recorded} status=${outcome.upstreamStatus}${outcome.notModified ? " (304)" : ""}`, ); return { - notModified: false, - upstreamStatus: worstStatus, - ...(lastEtag !== null ? { etag: lastEtag } : {}), + notModified: outcome.notModified, + upstreamStatus: outcome.upstreamStatus, + ...(outcome.etag !== null ? { etag: outcome.etag } : {}), }; } @@ -376,6 +385,58 @@ async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<Rel // Plugin Initialization // ============================================================================= +/** + * Send the desired-state list of source rows to the host. Called from + * `onInitialize` (after the host has installed the releases reverse-RPC + * handler) so the plugin's source rows are materialized whenever the + * config changes. + * + * Retries on `METHOD_NOT_FOUND` with linear backoff: the host installs the + * releases handler shortly after `initialize` returns, and there is a small + * race window where the plugin's first reverse-RPC call may land before the + * handler is in place. + */ +export async function registerSources( + rpc: HostRpcClient, + subscriptions: UploaderSubscription[], +): Promise<{ registered: number; pruned: number } | null> { + const sources = subscriptions.map((sub) => ({ + sourceKey: subscriptionToSourceKey(sub), + displayName: displayNameFor(sub), + kind: "rss-uploader" as const, + config: { subscription: { kind: sub.kind, identifier: sub.identifier } }, + })); + + const maxAttempts = 5; + for (let attempt = 1; attempt <= maxAttempts; attempt++) { + try { + return await rpc.call<{ registered: number; pruned: number }>( + RELEASES_METHODS.REGISTER_SOURCES, + { sources }, + ); + } catch (err) { + const isMethodNotFound = err instanceof HostRpcError && err.code === -32601; + if (isMethodNotFound && attempt < maxAttempts) { + // Wait for the host to finish installing the releases reverse-RPC + // handler. Linear backoff: 50ms, 100ms, 150ms, 200ms. + await new Promise((r) => setTimeout(r, 50 * attempt)); + continue; + } + const reason = err instanceof Error ? err.message : String(err); + logger.error(`register_sources failed: ${reason}`); + return null; + } + } + return null; +} + +/** Human-readable label shown in the Release tracking settings table. */ +function displayNameFor(sub: UploaderSubscription): string { + if (sub.kind === "user") return `Nyaa: ${sub.identifier}`; + if (sub.kind === "query") return `Nyaa search: ${sub.identifier}`; + return `Nyaa params: ${sub.identifier}`; +} + createReleaseSourcePlugin({ manifest, provider: { @@ -387,7 +448,7 @@ createReleaseSourcePlugin({ }, }, logLevel: "info", - onInitialize(params: InitializeParams) { + async onInitialize(params: InitializeParams) { state.hostRpc = params.hostRpc; const ac = params.adminConfig ?? {}; if (typeof ac.uploaders === "string") { @@ -402,6 +463,17 @@ createReleaseSourcePlugin({ logger.info( `initialized: subscriptions=${state.subscriptions.length} timeoutMs=${state.requestTimeoutMs} minConfidence=${state.minConfidence} defaultPoll=${DEFAULT_POLL_INTERVAL_S}s`, ); + + // Materialize source rows. Deferred to a microtask + retry on + // METHOD_NOT_FOUND so we run *after* the host installs the releases + // reverse-RPC handler (it does so right after `initialize` returns). + queueMicrotask(() => { + void registerSources(params.hostRpc, state.subscriptions).then((result) => { + if (result) { + logger.info(`register_sources: registered=${result.registered} pruned=${result.pruned}`); + } + }); + }); }, }); diff --git a/plugins/release-nyaa/src/manifest.ts b/plugins/release-nyaa/src/manifest.ts index 16dceb43..e1e5f58f 100644 --- a/plugins/release-nyaa/src/manifest.ts +++ b/plugins/release-nyaa/src/manifest.ts @@ -43,11 +43,11 @@ export const manifest = { key: "uploaders", label: "Uploader Subscriptions", description: - "Comma-separated list of trusted uploader handles or queries. Each entry is either `username` (a Nyaa user feed) or `q:<query>` (a fallback site-wide search filter, useful for groups without a dedicated account). Confidence stays above the rejection threshold only for entries that match a tracked series alias.", + "Comma-separated list of trusted uploader handles or queries. Each entry is one of: `username` (a Nyaa user feed); `q:<query>` (a plain site-wide search); or `q:?<params>` (URL-style allowlisted params: `q`, `c`, `f`, `u` — e.g. `q:?c=3_1&q=Berserk` to search the Literature → English-translated category). Confidence stays above the rejection threshold only for entries that match a tracked series alias.", type: "string" as const, required: false, default: "", - example: "1r0n,TankobonBlur,q:LuminousScans", + example: "1r0n,TankobonBlur,q:LuminousScans,q:?c=3_1&q=Berserk", }, { key: "requestTimeoutMs", @@ -73,7 +73,7 @@ export const manifest = { userDescription: "Watches Nyaa.si uploader feeds for new releases of tracked series. Matches by title alias — make sure your series' aliases (auto-populated from metadata or added manually in the Tracking panel) cover the way the uploader names them. Notification-only — Codex never downloads anything.", adminSetupInstructions: - "1. Configure the Uploader Subscriptions field with a comma-separated list of trusted uploader handles (e.g. `1r0n,TankobonBlur`). Use `q:<query>` for groups without a Nyaa account. 2. Make sure tracked series have aliases that match how the uploader names releases (e.g. include alternate spellings, romanizations, the volume-ranges tag uploaders use). 3. The plugin polls the uploader feeds at the configured interval; any release whose title matches a tracked alias is recorded as a candidate. Filtering by formats / `(Digital)` tag happens at parse time and is logged but doesn't reject candidates by default.", + "1. Set the **Uploaders** config field to a comma-separated list. Each entry is one of: `username` (a Nyaa user feed, e.g. `tsuna69`), `q:<query>` (a plain site-wide search, e.g. `q:LuminousScans`), or `q:?<params>` (URL-style search with allowlisted keys `q`, `c`, `f`, `u`, e.g. `q:?c=3_1&q=Berserk` for the English-translated Literature category). 2. Save. The plugin restarts and the host materializes one row per entry in **Settings → Release tracking** — that's where you flip rows on/off, override the poll interval, or hit *Poll now*. 3. Make sure tracked series have aliases that match how the uploader names releases (alternate spellings, romanizations, volume-range tags). The plugin auto-prunes rows when you remove an entry from the list and re-save, so the Release tracking table stays in sync with this CSV.", } as const satisfies PluginManifest & { capabilities: { releaseSource: { kinds: ["rss-uploader"] } }; }; diff --git a/plugins/sdk-typescript/src/types/releases.ts b/plugins/sdk-typescript/src/types/releases.ts index d5c506cd..f5765bcf 100644 --- a/plugins/sdk-typescript/src/types/releases.ts +++ b/plugins/sdk-typescript/src/types/releases.ts @@ -27,6 +27,18 @@ export const RELEASES_METHODS = { SOURCE_STATE_GET: "releases/source_state/get", /** Set persisted per-source state (etag only — other fields are host-owned). */ SOURCE_STATE_SET: "releases/source_state/set", + /** + * Replace the set of `release_sources` rows owned by this plugin. + * + * Plugins call this from `onInitialize` (and after any config change, which + * triggers a process restart that re-runs `onInitialize`). Each call carries + * the plugin's full desired-state list; the host upserts every entry on + * `(plugin_id, source_key)` and prunes rows whose `source_key` is not in + * the request. User-managed fields (`enabled`, `pollIntervalS`) are + * preserved across re-registrations so an admin's overrides aren't + * trampled by a plugin restart. + */ + REGISTER_SOURCES: "releases/register_sources", } as const; // ============================================================================= @@ -152,6 +164,48 @@ export interface SourceStateSetRequest { etag?: string; } +// ============================================================================= +// releases/register_sources +// ============================================================================= + +/** + * One source the plugin wants the host to materialize as a `release_sources` + * row. The plugin owns the `sourceKey` namespace; the host treats it as an + * opaque string for dedup keyed on `(pluginId, sourceKey)`. + */ +export interface RegisteredSourceInput { + /** + * Stable per-plugin identifier. Reuse the same key across calls so user + * overrides (enabled, pollIntervalS) survive plugin restarts. + */ + sourceKey: string; + /** Human-readable label shown in the Release tracking settings UI. */ + displayName: string; + /** + * Must be one of the kinds the plugin declared in its + * `releaseSource.kinds` capability — the host rejects anything else. + */ + kind: "rss-uploader" | "rss-series" | "api-feed" | "metadata-feed"; + /** + * Optional opaque per-source config snapshot persisted on the row. The + * host doesn't interpret it; the plugin reads its own admin config + * directly. Useful for surfacing "what did this source originate from?" + * in the UI / logs. + */ + config?: Record<string, unknown> | null; +} + +export interface RegisterSourcesRequest { + sources: RegisteredSourceInput[]; +} + +export interface RegisterSourcesResponse { + /** Number of sources upserted (created or refreshed). */ + registered: number; + /** Number of sources removed because they were not in the request. */ + pruned: number; +} + // ============================================================================= // releases/poll (host -> plugin) // ============================================================================= @@ -159,10 +213,23 @@ export interface SourceStateSetRequest { /** * Parameters for the host's call into a release-source plugin's * `releases/poll` handler. Carries the source row to poll plus any ETag the - * plugin recorded on its previous poll. + * plugin recorded on its previous poll, plus the plugin-defined source key + * and per-source config snapshot so the plugin can dispatch directly without + * a reverse-RPC roundtrip. */ export interface ReleasePollRequest { sourceId: string; + /** + * The same `sourceKey` the plugin passed to `releases/register_sources`. + * Useful when one plugin process owns multiple source rows (e.g., one per + * Nyaa uploader) and needs to know which one to poll. + */ + sourceKey?: string; + /** + * Snapshot of `release_sources.config` for this row. Plugins that stash + * per-source config on register can read it back here. + */ + config?: Record<string, unknown> | null; etag?: string; } diff --git a/src/commands/seed.rs b/src/commands/seed.rs index 0c24a327..7388d3df 100644 --- a/src/commands/seed.rs +++ b/src/commands/seed.rs @@ -66,6 +66,12 @@ pub struct SeedPluginConfig { pub credential_delivery: String, #[serde(default)] pub credentials: Option<serde_json::Value>, + /// Optional admin-side plugin configuration (the same JSON object that + /// the user would paste into "Configuration" in the plugin edit dialog). + /// Persisted on the plugin row so the plugin process receives it via + /// `InitializeParams.adminConfig` on first start. + #[serde(default, alias = "admin_config")] + pub config: Option<serde_json::Value>, #[serde(default = "default_true")] pub enabled: bool, } @@ -332,7 +338,7 @@ async fn seed_plugins( vec![], // library_ids (empty = all libraries) plugin_cfg.credentials.as_ref(), // credentials &plugin_cfg.credential_delivery, // credential_delivery - None, // config + plugin_cfg.config.clone(), // admin config plugin_cfg.enabled, None, // created_by None, // rate_limit_requests_per_minute diff --git a/src/commands/serve.rs b/src/commands/serve.rs index d498f7bc..f8d2ebe8 100644 --- a/src/commands/serve.rs +++ b/src/commands/serve.rs @@ -286,7 +286,8 @@ pub async fn serve_command(config_path: PathBuf) -> anyhow::Result<()> { )) .with_metrics_service(plugin_metrics_service.clone()) .with_plugin_file_storage(plugin_file_storage.clone()) - .with_event_broadcaster(event_broadcaster.clone()), + .with_event_broadcaster(event_broadcaster.clone()) + .with_scheduler(scheduler.clone()), ); // Load enabled plugins from database match plugin_manager.load_all().await { diff --git a/src/db/repositories/release_sources.rs b/src/db/repositories/release_sources.rs index bcd61432..35c81c32 100644 --- a/src/db/repositories/release_sources.rs +++ b/src/db/repositories/release_sources.rs @@ -142,6 +142,65 @@ impl ReleaseSourceRepository { Self::create(db, params).await } + /// Idempotent upsert keyed on `(plugin_id, source_key)`. + /// + /// On insert, the row is created with `params` and defaults to enabled. + /// On update, **only the plugin-owned descriptive fields** are refreshed + /// (`display_name`, `kind`, `config`). User-managed fields (`enabled`, + /// `poll_interval_s`) are preserved so an admin's interval override or + /// disable toggle survives a plugin re-registration. + /// + /// Used by `releases/register_sources` so a plugin can declare its full + /// desired-state list on every initialize without trampling user choices. + pub async fn upsert( + db: &DatabaseConnection, + params: NewReleaseSource, + ) -> Result<ReleaseSource> { + if !kind::is_valid(¶ms.kind) { + anyhow::bail!("invalid kind: {}", params.kind); + } + if let Some(existing) = Self::find_by_key(db, ¶ms.plugin_id, ¶ms.source_key).await? + { + let mut active: release_sources::ActiveModel = existing.into(); + active.display_name = Set(params.display_name); + active.kind = Set(params.kind); + active.config = Set(params.config); + active.updated_at = Set(Utc::now()); + return Ok(active.update(db).await?); + } + Self::create(db, params).await + } + + /// Return every row owned by `plugin_id`, ordered by `source_key`. + pub async fn list_by_plugin( + db: &DatabaseConnection, + plugin_id: &str, + ) -> Result<Vec<ReleaseSource>> { + Ok(ReleaseSources::find() + .filter(release_sources::Column::PluginId.eq(plugin_id)) + .order_by_asc(release_sources::Column::SourceKey) + .all(db) + .await?) + } + + /// Delete every row owned by `plugin_id` whose `source_key` is **not** in + /// `keep_keys`. Returns the number of rows removed. Cascades to + /// `release_ledger`. Used by `register_sources` to prune sources that the + /// plugin no longer declares. + pub async fn delete_by_plugin_excluding( + db: &DatabaseConnection, + plugin_id: &str, + keep_keys: &[String], + ) -> Result<u64> { + let mut query = + ReleaseSources::delete_many().filter(release_sources::Column::PluginId.eq(plugin_id)); + if !keep_keys.is_empty() { + query = query.filter(release_sources::Column::SourceKey.is_not_in(keep_keys.to_vec())); + } + let result = query.exec(db).await?; + Ok(result.rows_affected) + } + /// Apply a PATCH-style update. pub async fn update( db: &DatabaseConnection, @@ -419,6 +478,154 @@ mod tests { assert!(result.is_err(), "duplicate key must fail"); } + #[tokio::test] + async fn upsert_creates_when_missing_and_preserves_user_fields_on_update() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + // First call creates the row. + let created = ReleaseSourceRepository::upsert(conn, nyaa_source()) + .await + .unwrap(); + assert!(created.enabled); + assert_eq!(created.poll_interval_s, 3600); + + // Admin disables and overrides interval. + ReleaseSourceRepository::update( + conn, + created.id, + ReleaseSourceUpdate { + enabled: Some(false), + poll_interval_s: Some(900), + ..Default::default() + }, + ) + .await + .unwrap(); + + // Plugin re-registers with a different display name, kind, and config. + let mut params = nyaa_source(); + params.display_name = "Nyaa: tsuna69 (refreshed)".to_string(); + params.config = Some(serde_json::json!({ "subscription": "tsuna69" })); + params.poll_interval_s = 7200; // would-be interval is ignored on update + let updated = ReleaseSourceRepository::upsert(conn, params).await.unwrap(); + + assert_eq!(updated.id, created.id, "same key returns same row"); + assert_eq!(updated.display_name, "Nyaa: tsuna69 (refreshed)"); + assert_eq!( + updated.config, + Some(serde_json::json!({ "subscription": "tsuna69" })) + ); + assert!( + !updated.enabled, + "user-set enabled flag must survive a plugin re-register" + ); + assert_eq!( + updated.poll_interval_s, 900, + "user-set poll_interval_s must survive a plugin re-register" + ); + } + + #[tokio::test] + async fn list_by_plugin_returns_only_that_plugins_rows() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + ReleaseSourceRepository::create(conn, nyaa_source()) + .await + .unwrap(); + let mut other = nyaa_source(); + other.plugin_id = "release-mangaupdates".to_string(); + other.source_key = "default".to_string(); + ReleaseSourceRepository::create(conn, other).await.unwrap(); + + let nyaa = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert_eq!(nyaa.len(), 1); + assert_eq!(nyaa[0].plugin_id, "release-nyaa"); + } + + #[tokio::test] + async fn delete_by_plugin_excluding_prunes_missing_keys() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let mut a = nyaa_source(); + a.source_key = "user:tsuna69".to_string(); + let mut b = nyaa_source(); + b.source_key = "user:other".to_string(); + let mut c = nyaa_source(); + c.source_key = "user:gone".to_string(); + ReleaseSourceRepository::create(conn, a).await.unwrap(); + ReleaseSourceRepository::create(conn, b).await.unwrap(); + ReleaseSourceRepository::create(conn, c).await.unwrap(); + + // Keep only the first two. + let keep = vec!["user:tsuna69".to_string(), "user:other".to_string()]; + let removed = + ReleaseSourceRepository::delete_by_plugin_excluding(conn, "release-nyaa", &keep) + .await + .unwrap(); + assert_eq!(removed, 1); + + let remaining = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert_eq!(remaining.len(), 2); + let keys: Vec<&str> = remaining.iter().map(|r| r.source_key.as_str()).collect(); + assert!(keys.contains(&"user:tsuna69")); + assert!(keys.contains(&"user:other")); + } + + #[tokio::test] + async fn delete_by_plugin_excluding_with_empty_keep_removes_all() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + ReleaseSourceRepository::create(conn, nyaa_source()) + .await + .unwrap(); + let mut other = nyaa_source(); + other.source_key = "user:other".to_string(); + ReleaseSourceRepository::create(conn, other).await.unwrap(); + + let removed = + ReleaseSourceRepository::delete_by_plugin_excluding(conn, "release-nyaa", &[]) + .await + .unwrap(); + assert_eq!(removed, 2); + + let remaining = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert!(remaining.is_empty()); + } + + #[tokio::test] + async fn delete_by_plugin_excluding_does_not_touch_other_plugins() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + ReleaseSourceRepository::create(conn, nyaa_source()) + .await + .unwrap(); + let mut other = nyaa_source(); + other.plugin_id = "release-mangaupdates".to_string(); + other.source_key = "default".to_string(); + ReleaseSourceRepository::create(conn, other).await.unwrap(); + + // Wipe everything for nyaa; mangaupdates row must survive. + ReleaseSourceRepository::delete_by_plugin_excluding(conn, "release-nyaa", &[]) + .await + .unwrap(); + + let mu = ReleaseSourceRepository::list_by_plugin(conn, "release-mangaupdates") + .await + .unwrap(); + assert_eq!(mu.len(), 1); + } + #[tokio::test] async fn delete_removes_row() { let (db, _temp) = create_test_db().await; diff --git a/src/services/plugin/handle.rs b/src/services/plugin/handle.rs index 55f60f6f..66ba0f82 100644 --- a/src/services/plugin/handle.rs +++ b/src/services/plugin/handle.rs @@ -151,6 +151,9 @@ pub struct PluginHandle { /// Optional event broadcaster used by handlers that emit cross-process /// notifications (releases handler emits `ReleaseAnnounced`). event_broadcaster: Option<Arc<crate::events::EventBroadcaster>>, + /// Optional scheduler reference so the releases handler can reconcile + /// release-source schedules immediately after `releases/register_sources`. + scheduler: Option<Arc<tokio::sync::Mutex<crate::scheduler::Scheduler>>>, } impl PluginHandle { @@ -165,6 +168,7 @@ impl PluginHandle { storage_handler: None, release_db: None, event_broadcaster: None, + scheduler: None, } } @@ -182,6 +186,7 @@ impl PluginHandle { storage_handler: Some(storage_handler), release_db: None, event_broadcaster: None, + scheduler: None, } } @@ -203,6 +208,17 @@ impl PluginHandle { self } + /// Attach a scheduler reference so the releases reverse-RPC handler can + /// trigger a release-source reconcile when the plugin calls + /// `releases/register_sources`. Builder-style. + pub fn with_scheduler( + mut self, + scheduler: Arc<tokio::sync::Mutex<crate::scheduler::Scheduler>>, + ) -> Self { + self.scheduler = Some(scheduler); + self + } + /// Get the current plugin state pub async fn state(&self) -> PluginState { self.state.read().await.clone() @@ -332,6 +348,7 @@ impl PluginHandle { let plugin_name = manifest.name.clone(); let release_db = self.release_db.clone(); let event_broadcaster = self.event_broadcaster.clone(); + let scheduler = self.scheduler.clone(); client .update_reverse_ctx(move |ctx| { ctx.set_capabilities(manifest_for_ctx.capabilities.clone()); @@ -343,6 +360,9 @@ impl PluginHandle { if let Some(b) = event_broadcaster { handler = handler.with_event_broadcaster(b); } + if let Some(s) = scheduler { + handler = handler.with_scheduler(s); + } ctx.set_releases_handler(handler); } }) diff --git a/src/services/plugin/manager.rs b/src/services/plugin/manager.rs index e4e36c70..dacf857a 100644 --- a/src/services/plugin/manager.rs +++ b/src/services/plugin/manager.rs @@ -335,6 +335,10 @@ pub struct PluginManager { /// Optional event broadcaster handed to per-plugin handles so reverse-RPC /// handlers (releases/record) can emit cross-process notifications. event_broadcaster: Option<Arc<crate::events::EventBroadcaster>>, + /// Optional scheduler handle so the releases reverse-RPC handler can + /// trigger a release-source reconcile when a plugin calls + /// `releases/register_sources`. + scheduler: Option<Arc<tokio::sync::Mutex<crate::scheduler::Scheduler>>>, } impl PluginManager { @@ -350,6 +354,7 @@ impl PluginManager { metrics_service: None, plugin_file_storage: None, event_broadcaster: None, + scheduler: None, } } @@ -384,6 +389,17 @@ impl PluginManager { self } + /// Hand the scheduler to per-plugin handles so the releases reverse-RPC + /// handler can reconcile release-source schedules when a plugin calls + /// `releases/register_sources`. Builder-style. + pub fn with_scheduler( + mut self, + scheduler: Arc<tokio::sync::Mutex<crate::scheduler::Scheduler>>, + ) -> Self { + self.scheduler = Some(scheduler); + self + } + /// Load all enabled plugins from database pub async fn load_all(&self) -> Result<usize, PluginManagerError> { debug!("Loading enabled plugins from database..."); @@ -603,6 +619,9 @@ impl PluginManager { if let Some(ref b) = self.event_broadcaster { handle = handle.with_event_broadcaster(b.clone()); } + if let Some(ref s) = self.scheduler { + handle = handle.with_scheduler(s.clone()); + } // Start the plugin match handle.start().await { @@ -743,6 +762,9 @@ impl PluginManager { if let Some(ref b) = self.event_broadcaster { handle = handle.with_event_broadcaster(b.clone()); } + if let Some(ref s) = self.scheduler { + handle = handle.with_scheduler(s.clone()); + } // Start the plugin match handle.start().await { diff --git a/src/services/plugin/protocol.rs b/src/services/plugin/protocol.rs index af43ccd9..e388158a 100644 --- a/src/services/plugin/protocol.rs +++ b/src/services/plugin/protocol.rs @@ -259,6 +259,10 @@ pub mod methods { pub const RELEASES_SOURCE_STATE_GET: &str = "releases/source_state/get"; /// Set persisted state for a release source. pub const RELEASES_SOURCE_STATE_SET: &str = "releases/source_state/set"; + /// Replace the set of release-source rows owned by this plugin. + /// The host upserts each entry by `(plugin_id, source_key)` and prunes + /// rows whose `source_key` is no longer in the input list. + pub const RELEASES_REGISTER_SOURCES: &str = "releases/register_sources"; } // ============================================================================= @@ -437,6 +441,20 @@ pub enum ReleaseSourceKind { MetadataFeed, } +impl ReleaseSourceKind { + /// Canonical kebab-case string matching `release_sources.kind` and the + /// serde representation. Used when comparing against string-typed + /// `kind` fields parsed from RPC requests. + pub fn as_str(&self) -> &'static str { + match self { + Self::RssUploader => "rss-uploader", + Self::RssSeries => "rss-series", + Self::ApiFeed => "api-feed", + Self::MetadataFeed => "metadata-feed", + } + } +} + impl PluginCapabilities { /// Check if the plugin can provide series metadata pub fn can_provide_series_metadata(&self) -> bool { @@ -1406,6 +1424,18 @@ pub struct ReleasePollRequest { /// `releases/source_state/get` for richer state (etag, last_polled_at) /// or `releases/list_tracked` to harvest the tracked-series scope. pub source_id: uuid::Uuid, + /// Plugin-defined stable key for this source row (the same value the + /// plugin originally passed to `releases/register_sources`). Carried in + /// the poll request so the plugin can dispatch directly without a + /// reverse-RPC roundtrip — useful when one plugin process owns multiple + /// source rows (e.g., one per Nyaa uploader). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub source_key: Option<String>, + /// Snapshot of `release_sources.config` at poll time, if any. Plugins + /// that store per-source config on register can read it back here to + /// avoid keeping their own `(sourceKey, config)` map in memory. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub config: Option<serde_json::Value>, /// Etag value from the previous successful poll, if any. Plugins doing /// HTTP conditional GETs (`If-None-Match`) can use it directly. #[serde(default, skip_serializing_if = "Option::is_none")] diff --git a/src/services/plugin/releases_handler.rs b/src/services/plugin/releases_handler.rs index 69fa320e..2d8bd441 100644 --- a/src/services/plugin/releases_handler.rs +++ b/src/services/plugin/releases_handler.rs @@ -11,25 +11,30 @@ //! and validation. use std::collections::HashMap; +use std::sync::Arc; use chrono::{DateTime, Utc}; use sea_orm::DatabaseConnection; use serde::{Deserialize, Serialize}; use serde_json::Value; -use tracing::{debug, error, warn}; +use tokio::sync::Mutex; +use tracing::{debug, error, info, warn}; use uuid::Uuid; use super::protocol::{ JsonRpcError, JsonRpcRequest, JsonRpcResponse, ReleaseSourceCapability, RequestId, error_codes, methods, }; +use crate::db::entities::release_sources::kind as source_kind; use crate::db::repositories::{ - ReleaseLedgerRepository, ReleaseSourceRepository, SeriesAliasRepository, + NewReleaseSource, ReleaseLedgerRepository, ReleaseSourceRepository, SeriesAliasRepository, SeriesExternalIdRepository, SeriesTrackingRepository, TrackingUpdate, }; +use crate::scheduler::Scheduler; use crate::services::release::candidate::ReleaseCandidate; use crate::services::release::languages::{includes, resolve_for_series}; use crate::services::release::matcher::{evaluate, resolve_threshold}; +use crate::services::release::schedule::{DEFAULT_POLL_INTERVAL_S, MIN_POLL_INTERVAL_S}; /// Default page size for `releases/list_tracked` when the caller doesn't /// specify one. Matches the Phase 3 risk-mitigation note. @@ -55,6 +60,9 @@ pub struct ReleasesRequestHandler { /// Optional event broadcaster used to emit `ReleaseAnnounced` events on /// successful (non-deduped) `releases/record` inserts. event_broadcaster: Option<std::sync::Arc<crate::events::EventBroadcaster>>, + /// Optional scheduler reference used by `releases/register_sources` to + /// reconcile schedules immediately after the source set changes. + scheduler: Option<Arc<Mutex<Scheduler>>>, } impl ReleasesRequestHandler { @@ -68,6 +76,7 @@ impl ReleasesRequestHandler { plugin_name, capability, event_broadcaster: None, + scheduler: None, } } @@ -81,6 +90,13 @@ impl ReleasesRequestHandler { self } + /// Attach a scheduler reference so `releases/register_sources` reconciles + /// schedules without waiting for a server restart. Builder-style. + pub fn with_scheduler(mut self, scheduler: Arc<Mutex<Scheduler>>) -> Self { + self.scheduler = Some(scheduler); + self + } + /// Handle a `releases/*` JSON-RPC request and return a response. pub async fn handle_request(&self, request: &JsonRpcRequest) -> JsonRpcResponse { let id = request.id.clone(); @@ -97,6 +113,7 @@ impl ReleasesRequestHandler { methods::RELEASES_RECORD => self.handle_record(request).await, methods::RELEASES_SOURCE_STATE_GET => self.handle_state_get(request).await, methods::RELEASES_SOURCE_STATE_SET => self.handle_state_set(request).await, + methods::RELEASES_REGISTER_SOURCES => self.handle_register_sources(request).await, _ => JsonRpcResponse::error( Some(id), JsonRpcError::new( @@ -490,6 +507,171 @@ impl ReleasesRequestHandler { } } + /// Replace the set of `release_sources` rows owned by this plugin. + /// + /// This is the materialization endpoint plugins call from `onInitialize` + /// (and on any subsequent config change, which is delivered via plugin + /// process restart). Each call carries the plugin's full desired-state + /// list: + /// + /// - **Upsert** every entry on `(plugin_id, source_key)`. New rows are + /// inserted; existing rows have only the plugin-owned descriptive + /// fields refreshed. User-managed fields (`enabled`, `poll_interval_s`) + /// survive across re-registrations so an admin's interval override or + /// disable toggle isn't trampled when the plugin restarts. + /// - **Prune** rows owned by this plugin whose `source_key` is not in the + /// request. Deletes cascade to `release_ledger`. An empty `sources` + /// list wipes the plugin's row set, which is the correct behavior when + /// an admin clears the plugin's config. + /// - **Reconcile** the scheduler so newly-registered sources start polling + /// on their next cron tick (and pruned ones stop). Best-effort: if the + /// reconcile fails (or no scheduler is wired), the call still succeeds + /// because the row writes are persisted. + /// + /// `kind` is validated against the `release_source` capability the plugin + /// declared in its manifest, so a plugin can't register sources of a + /// `kind` outside its declared capability surface. `poll_interval_s` is + /// taken from the request only when creating new rows; updates ignore it. + async fn handle_register_sources(&self, request: &JsonRpcRequest) -> JsonRpcResponse { + let id = request.id.clone(); + let params: RegisterSourcesRequest = match parse_params(&request.params) { + Ok(p) => p, + Err(resp) => return resp.with_id(id), + }; + + // Validate every source up front so we don't write partial state on a + // bad request. + for src in ¶ms.sources { + if src.source_key.trim().is_empty() { + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new(error_codes::INVALID_PARAMS, "source_key cannot be empty"), + ); + } + if src.display_name.trim().is_empty() { + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new(error_codes::INVALID_PARAMS, "display_name cannot be empty"), + ); + } + if !source_kind::is_valid(&src.kind) { + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new( + error_codes::INVALID_PARAMS, + format!("invalid kind: {}", src.kind), + ), + ); + } + if !self.capability.kinds.iter().any(|k| k.as_str() == src.kind) { + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new( + error_codes::INVALID_PARAMS, + format!( + "kind {} not declared in plugin's release_source capability", + src.kind + ), + ), + ); + } + } + // Reject duplicate source_keys in the same request — they would + // collapse to one row at upsert time and silently drop the second + // entry's display_name/config, which is almost always a plugin bug. + let mut seen: std::collections::HashSet<&str> = std::collections::HashSet::new(); + for src in ¶ms.sources { + if !seen.insert(src.source_key.as_str()) { + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new( + error_codes::INVALID_PARAMS, + format!("duplicate source_key in request: {}", src.source_key), + ), + ); + } + } + + // Resolve the per-source default poll interval. Used only when + // creating new rows; existing rows keep their interval. Falls back + // to the host-wide default when the plugin's manifest declares 0. + let raw = if self.capability.default_poll_interval_s == 0 { + DEFAULT_POLL_INTERVAL_S + } else { + self.capability.default_poll_interval_s + }; + let default_interval = (raw as i32).max(MIN_POLL_INTERVAL_S as i32); + + let keep_keys: Vec<String> = params + .sources + .iter() + .map(|s| s.source_key.clone()) + .collect(); + + // Upsert each source. + let mut registered = 0u32; + for src in params.sources { + let new = NewReleaseSource { + plugin_id: self.plugin_name.clone(), + source_key: src.source_key, + display_name: src.display_name, + kind: src.kind, + poll_interval_s: default_interval, + enabled: None, + config: src.config, + }; + if let Err(e) = ReleaseSourceRepository::upsert(&self.db, new).await { + error!(error = %e, "release source upsert failed"); + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new(error_codes::INTERNAL_ERROR, format!("db error: {}", e)), + ); + } + registered += 1; + } + + // Prune sources the plugin no longer declares. + let pruned = match ReleaseSourceRepository::delete_by_plugin_excluding( + &self.db, + &self.plugin_name, + &keep_keys, + ) + .await + { + Ok(n) => n, + Err(e) => { + error!(error = %e, "release source prune failed"); + return JsonRpcResponse::error( + Some(id), + JsonRpcError::new(error_codes::INTERNAL_ERROR, format!("db error: {}", e)), + ); + } + }; + + info!( + plugin = %self.plugin_name, + registered, + pruned, + "release sources registered" + ); + + // Reconcile schedules. Best-effort — log failures but don't fail the + // RPC, since the rows are already persisted and the next scheduler + // start (or HTTP-driven reconcile) will catch up. + if let Some(ref scheduler) = self.scheduler { + let mut guard = scheduler.lock().await; + if let Err(e) = guard.reconcile_release_sources().await { + warn!(error = %e, "scheduler reconcile after register_sources failed"); + } + } + + let response = RegisterSourcesResponse { + registered, + pruned: pruned as u32, + }; + JsonRpcResponse::success(id, serde_json::to_value(response).unwrap()) + } + /// Confirm `source_id` exists and belongs to the calling plugin. Returns /// an error response if either check fails. async fn assert_source_belongs( @@ -601,6 +783,37 @@ struct SourceStateSetRequest { etag: Option<String>, } +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +struct RegisterSourcesRequest { + sources: Vec<RegisteredSourceInput>, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +struct RegisteredSourceInput { + /// Stable per-plugin identifier for the source. Opaque to the host. + source_key: String, + /// Human-readable label shown in the Release tracking settings table. + display_name: String, + /// One of the canonical `release_sources.kind` values; must also be + /// declared in the plugin's `release_source` capability. + kind: String, + /// Optional opaque per-source config snapshot. Stored on the row for + /// the host's reference; the plugin reads its own admin config directly. + #[serde(default)] + config: Option<Value>, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +struct RegisterSourcesResponse { + /// Number of sources upserted (created or refreshed). + registered: u32, + /// Number of sources removed because they were not in the request. + pruned: u32, +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct SourceStateView { @@ -665,10 +878,11 @@ pub fn is_releases_method(method: &str) -> bool { mod tests { use super::*; use crate::db::ScanningStrategy; - use crate::db::entities::release_sources::kind; + use crate::db::entities::release_sources::{self, kind}; use crate::db::repositories::{ - LibraryRepository, NewReleaseSource, ReleaseSourceRepository, SeriesAliasRepository, - SeriesExternalIdRepository, SeriesRepository, SeriesTrackingRepository, TrackingUpdate, + LibraryRepository, NewReleaseSource, ReleaseSourceRepository, ReleaseSourceUpdate, + SeriesAliasRepository, SeriesExternalIdRepository, SeriesRepository, + SeriesTrackingRepository, TrackingUpdate, }; use crate::db::test_helpers::create_test_db; use crate::services::plugin::protocol::ReleaseSourceKind; @@ -1470,4 +1684,313 @@ mod tests { "untracked series must not have its high-water mark moved" ); } + + // ------------------------------------------------------------------------- + // register_sources + // ------------------------------------------------------------------------- + + fn register_request(sources: Value) -> JsonRpcRequest { + make_request( + methods::RELEASES_REGISTER_SOURCES, + json!({ "sources": sources }), + ) + } + + #[tokio::test] + async fn register_sources_creates_rows_for_a_fresh_plugin() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + let req = register_request(json!([ + { + "sourceKey": "user:tsuna69", + "displayName": "Nyaa: tsuna69", + "kind": "rss-uploader", + "config": { "subscription": { "kind": "user", "identifier": "tsuna69" } } + }, + { + "sourceKey": "query:LuminousScans", + "displayName": "Nyaa search: LuminousScans", + "kind": "rss-uploader" + } + ])); + let resp = handler.handle_request(&req).await; + assert!(!resp.is_error(), "unexpected error: {:?}", resp.error); + let body: Value = resp.result.unwrap(); + assert_eq!(body["registered"], 2); + assert_eq!(body["pruned"], 0); + + let rows = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert_eq!(rows.len(), 2); + let by_key: HashMap<&str, &release_sources::Model> = + rows.iter().map(|r| (r.source_key.as_str(), r)).collect(); + assert!(by_key.contains_key("user:tsuna69")); + assert!(by_key.contains_key("query:LuminousScans")); + assert!( + by_key["user:tsuna69"].enabled, + "new rows default to enabled" + ); + } + + #[tokio::test] + async fn register_sources_prunes_rows_no_longer_declared() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + // First call creates two rows. + let _ = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "user:a", "displayName": "A", "kind": "rss-uploader" }, + { "sourceKey": "user:b", "displayName": "B", "kind": "rss-uploader" } + ]))) + .await; + + // Second call drops `user:b` and adds `user:c`. + let resp = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "user:a", "displayName": "A", "kind": "rss-uploader" }, + { "sourceKey": "user:c", "displayName": "C", "kind": "rss-uploader" } + ]))) + .await; + assert!(!resp.is_error()); + let body: Value = resp.result.unwrap(); + assert_eq!(body["registered"], 2); + assert_eq!(body["pruned"], 1); + + let rows = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + let keys: Vec<&str> = rows.iter().map(|r| r.source_key.as_str()).collect(); + assert!(keys.contains(&"user:a")); + assert!(keys.contains(&"user:c")); + assert!(!keys.contains(&"user:b"), "stale source must be pruned"); + } + + #[tokio::test] + async fn register_sources_with_empty_list_wipes_plugins_rows() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + let _ = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "user:a", "displayName": "A", "kind": "rss-uploader" } + ]))) + .await; + + let resp = handler.handle_request(®ister_request(json!([]))).await; + assert!(!resp.is_error()); + let body: Value = resp.result.unwrap(); + assert_eq!(body["registered"], 0); + assert_eq!(body["pruned"], 1); + + let rows = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert!(rows.is_empty()); + } + + #[tokio::test] + async fn register_sources_preserves_user_managed_fields_on_re_register() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + // Initial register. + let _ = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "user:tsuna69", "displayName": "Nyaa: tsuna69", "kind": "rss-uploader" } + ]))) + .await; + + // Admin disables it and pins a custom interval. + let row = ReleaseSourceRepository::find_by_key(conn, "release-nyaa", "user:tsuna69") + .await + .unwrap() + .unwrap(); + ReleaseSourceRepository::update( + conn, + row.id, + ReleaseSourceUpdate { + enabled: Some(false), + poll_interval_s: Some(900), + ..Default::default() + }, + ) + .await + .unwrap(); + + // Plugin re-registers (e.g., after restart) with a refreshed display name + new config. + let _ = handler + .handle_request(®ister_request(json!([ + { + "sourceKey": "user:tsuna69", + "displayName": "Nyaa: tsuna69 (refreshed)", + "kind": "rss-uploader", + "config": { "subscription": "fresh" } + } + ]))) + .await; + + let after = ReleaseSourceRepository::find_by_key(conn, "release-nyaa", "user:tsuna69") + .await + .unwrap() + .unwrap(); + assert_eq!(after.display_name, "Nyaa: tsuna69 (refreshed)"); + assert_eq!(after.config, Some(json!({ "subscription": "fresh" }))); + assert!(!after.enabled, "user-set disabled must survive re-register"); + assert_eq!( + after.poll_interval_s, 900, + "user-set poll_interval_s must survive re-register" + ); + } + + #[tokio::test] + async fn register_sources_does_not_touch_other_plugins_rows() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + + // Pre-existing source from a different plugin. + ReleaseSourceRepository::create( + conn, + NewReleaseSource { + plugin_id: "release-mangaupdates".to_string(), + source_key: "default".to_string(), + display_name: "MangaUpdates".to_string(), + kind: kind::RSS_SERIES.to_string(), + poll_interval_s: 3600, + enabled: None, + config: None, + }, + ) + .await + .unwrap(); + + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + // Empty register from nyaa — must not nuke mangaupdates' row. + let _ = handler.handle_request(®ister_request(json!([]))).await; + + let mu_rows = ReleaseSourceRepository::list_by_plugin(conn, "release-mangaupdates") + .await + .unwrap(); + assert_eq!(mu_rows.len(), 1); + } + + #[tokio::test] + async fn register_sources_rejects_kind_outside_capability() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + // Only declares rss-uploader. + make_capability(false, vec![]), + ); + + let resp = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "x", "displayName": "X", "kind": "rss-series" } + ]))) + .await; + assert!(resp.is_error()); + assert!(resp.error.unwrap().message.contains("not declared")); + + // Nothing was written. + let rows = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert!(rows.is_empty()); + } + + #[tokio::test] + async fn register_sources_rejects_invalid_kind_string() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + let resp = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "x", "displayName": "X", "kind": "frobnicate" } + ]))) + .await; + assert!(resp.is_error()); + assert!(resp.error.unwrap().message.contains("invalid kind")); + } + + #[tokio::test] + async fn register_sources_rejects_duplicate_keys_in_request() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + let resp = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "dup", "displayName": "A", "kind": "rss-uploader" }, + { "sourceKey": "dup", "displayName": "B", "kind": "rss-uploader" } + ]))) + .await; + assert!(resp.is_error()); + assert!(resp.error.unwrap().message.contains("duplicate")); + let rows = ReleaseSourceRepository::list_by_plugin(conn, "release-nyaa") + .await + .unwrap(); + assert!(rows.is_empty(), "validation must run before any write"); + } + + #[tokio::test] + async fn register_sources_rejects_empty_source_key_or_display_name() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + let resp1 = handler + .handle_request(®ister_request(json!([ + { "sourceKey": " ", "displayName": "X", "kind": "rss-uploader" } + ]))) + .await; + assert!(resp1.is_error()); + + let resp2 = handler + .handle_request(®ister_request(json!([ + { "sourceKey": "x", "displayName": " ", "kind": "rss-uploader" } + ]))) + .await; + assert!(resp2.is_error()); + } } diff --git a/src/tasks/handlers/poll_release_source.rs b/src/tasks/handlers/poll_release_source.rs index dfe1537f..6d2fb617 100644 --- a/src/tasks/handlers/poll_release_source.rs +++ b/src/tasks/handlers/poll_release_source.rs @@ -239,6 +239,8 @@ impl TaskHandler for PollReleaseSourceHandler { // Build the poll request. let req = ReleasePollRequest { source_id: source.id, + source_key: Some(source.source_key.clone()), + config: source.config.clone(), etag: source.etag.clone(), }; let timeout = self.task_request_timeout().await; diff --git a/web/src/components/series/SeriesReleasesPanel.test.tsx b/web/src/components/series/SeriesReleasesPanel.test.tsx index 9712822f..886b88f4 100644 --- a/web/src/components/series/SeriesReleasesPanel.test.tsx +++ b/web/src/components/series/SeriesReleasesPanel.test.tsx @@ -47,12 +47,10 @@ function entry(over: Partial<ReleaseLedgerEntry> = {}): ReleaseLedgerEntry { function paginated(entries: ReleaseLedgerEntry[]): PaginatedReleases { return { data: entries, - pagination: { - page: 1, - pageSize: 100, - total: entries.length, - totalPages: 1, - }, + page: 1, + pageSize: 100, + total: entries.length, + totalPages: 1, links: { self: "/api/v1/series/x/releases", }, diff --git a/web/src/components/series/SeriesReleasesPanel.tsx b/web/src/components/series/SeriesReleasesPanel.tsx index 6673b4ef..d22d2e36 100644 --- a/web/src/components/series/SeriesReleasesPanel.tsx +++ b/web/src/components/series/SeriesReleasesPanel.tsx @@ -113,7 +113,7 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { <IconRss size={18} /> <Text fw={600}>Releases</Text> <Badge color="gray" variant="light" size="sm"> - {data?.pagination.total ?? 0} + {data?.total ?? 0} </Badge> {isMuted && ( <Badge color="orange" variant="light" size="sm"> diff --git a/web/src/pages/ReleasesInbox.test.tsx b/web/src/pages/ReleasesInbox.test.tsx index 989696c6..7258a1c9 100644 --- a/web/src/pages/ReleasesInbox.test.tsx +++ b/web/src/pages/ReleasesInbox.test.tsx @@ -46,12 +46,10 @@ function entry(over: Partial<ReleaseLedgerEntry> = {}): ReleaseLedgerEntry { function paginated(entries: ReleaseLedgerEntry[]): PaginatedReleases { return { data: entries, - pagination: { - page: 1, - pageSize: 50, - total: entries.length, - totalPages: 1, - }, + page: 1, + pageSize: 50, + total: entries.length, + totalPages: 1, links: { self: "/api/v1/releases", }, diff --git a/web/src/pages/ReleasesInbox.tsx b/web/src/pages/ReleasesInbox.tsx index 604ece75..f88374ae 100644 --- a/web/src/pages/ReleasesInbox.tsx +++ b/web/src/pages/ReleasesInbox.tsx @@ -76,8 +76,8 @@ export function ReleasesInbox() { const markAcquired = useMarkReleaseAcquired(); const entries = data?.data ?? []; - const total = data?.pagination.total ?? 0; - const totalPages = data?.pagination.totalPages ?? 1; + const total = data?.total ?? 0; + const totalPages = data?.totalPages ?? 1; return ( <Box p="md"> From b70573816098f35b8761d53127af8389578afd66 Mon Sep 17 00:00:00 2001 From: Sylvain Cau <ashdevfr@gmail.com> Date: Mon, 4 May 2026 21:39:25 -0700 Subject: [PATCH 12/29] feat(release-tracking): seed tracking on toggle, bulk track menu, last-poll summary MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make release-tracking discoverable and actionable instead of an empty-form setup wizard. Toggling a series to tracked now auto-seeds matcher aliases, latest_known_*, and per-axis track_chapters/volumes flags from data the system already has, so users don't have to manually fill in the panel before notifications work. The Tracking panel collapses to a one-line summary by default and is hidden entirely on libraries with no covering release-source plugin. Backend - New seed_tracking_for_series service: Latin-script aliases from series.name + metadata + alternate titles, latest_known_* from local max chapter/volume, track_* inferred from book classification. Aliases are append-only; tracking flags overwrite on re-seed. - PATCH /series/{id}/tracking runs the seed on a false→true tracked flip before applying the user's patch (explicit overrides win). - New POST /series/bulk/track-for-releases and untrack-for-releases endpoints with per-series outcome reporting (tracked / skipped / errored). Mirrors the existing bulk-mark-as-read shape. - New release_sources.last_summary column populated by the poll task with a one-line outcome ("Fetched 12 items, recorded 1 (7 already in ledger), dropped 4 below threshold", "Up to date — upstream returned 304", etc.). Exposed on the source DTO. - New GET /release-sources/applicability endpoint (SeriesRead) returns whether any enabled release-source plugin applies to a given library and the friendly plugin display names. - BackfillTrackingFromMetadata task delegates to the seed service so there's one canonical implementation across the per-series PATCH, bulk endpoints, and the maintenance task. Frontend - New useReleaseTrackingApplicability hook drives three UI gates: the per-series Tracking panel + Releases tab, the bulk-selection menu entries, and the global navigation entry. - BulkSelectionToolbar gains "Track for releases" / "Don't track for releases" entries (gated on applicability), replacing the previous N-PATCH loop with single bulk endpoint calls. - TrackingPanel renders as a compact one-line summary by default (status, latest known marks, alias count); details are collapsible. Auto-seeding eliminates the empty-form first-track UX. - Release tracking settings table surfaces last_summary under the per-row last-polled timestamp and as the OK badge tooltip, so users can see why a poll returned no announcements without grepping logs. - Tracking panel + Releases tab hidden on series whose library has no applicable release-source plugin, eliminating dead-end UI. Tests cover seeding (Latin filter, idempotency, axis inference, overwrite-on-re-seed, manual-alias preservation), the new bulk endpoints, last_summary string formatting, and the updated TrackingPanel and BulkSelectionToolbar UX. --- docs/api/openapi.json | 227 ++++++++ migration/src/lib.rs | 4 + ...000077_add_release_sources_last_summary.rs | 40 ++ src/api/docs.rs | 6 + src/api/routes/v1/dto/release.rs | 7 + src/api/routes/v1/dto/tracking.rs | 37 ++ src/api/routes/v1/handlers/bulk.rs | 264 ++++++++- src/api/routes/v1/handlers/releases.rs | 116 +++- src/api/routes/v1/handlers/tracking.rs | 24 + src/api/routes/v1/routes/releases.rs | 6 + src/api/routes/v1/routes/series.rs | 8 + src/db/entities/release_sources.rs | 7 + src/db/repositories/release_sources.rs | 13 +- src/services/plugin/manager.rs | 141 +++-- src/services/plugin/permissions.rs | 4 +- src/services/plugin/releases_handler.rs | 3 + src/services/plugin/rpc.rs | 15 +- src/services/release/mod.rs | 4 + src/services/release/seed.rs | 525 ++++++++++++++++++ src/tasks/handlers/backfill_tracking.rs | 151 ++--- src/tasks/handlers/poll_release_source.rs | 137 ++++- web/openapi.json | 227 ++++++++ web/src/api/releases.ts | 25 + web/src/api/series.ts | 34 ++ .../library/BulkSelectionToolbar.test.tsx | 70 ++- .../library/BulkSelectionToolbar.tsx | 93 ++-- .../components/series/TrackingPanel.test.tsx | 16 +- web/src/components/series/TrackingPanel.tsx | 383 +++++++------ .../hooks/useReleaseTrackingApplicability.ts | 39 ++ web/src/pages/SeriesDetail.tsx | 26 +- .../settings/ReleaseTrackingSettings.tsx | 26 +- web/src/types/api.generated.ts | 250 +++++++++ 32 files changed, 2532 insertions(+), 396 deletions(-) create mode 100644 migration/src/m20260505_000077_add_release_sources_last_summary.rs create mode 100644 src/services/release/seed.rs create mode 100644 web/src/hooks/useReleaseTrackingApplicability.ts diff --git a/docs/api/openapi.json b/docs/api/openapi.json index 8a878e34..dda4e3ec 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -7139,6 +7139,54 @@ ] } }, + "/api/v1/release-sources/applicability": { + "get": { + "tags": [ + "Releases" + ], + "summary": "Whether release tracking is available for a given library.", + "description": "Read-only, requires only `SeriesRead`: the response carries no\nadmin-sensitive data (no plugin IDs, no configs, no library\nallowlists), just the boolean and friendly display names. Used by the\nfrontend to:\n\n- hide the per-series Tracking panel + Releases tab on libraries with\n no applicable plugin (cleaner UX);\n- decide whether to show the \"Track for releases\" / \"Don't track for\n releases\" entries in the bulk-selection menu.", + "operationId": "get_release_tracking_applicability", + "parameters": [ + { + "name": "libraryId", + "in": "query", + "description": "Optional library scope. When provided, only plugins that apply to\nthis library are considered (a plugin's `library_ids` field is\neither empty = all, or contains this UUID).", + "required": false, + "schema": { + "type": [ + "string", + "null" + ], + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "Applicability info", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApplicabilityResponse" + } + } + } + }, + "403": { + "description": "SeriesRead permission required" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/release-sources/{source_id}": { "patch": { "tags": [ @@ -8197,6 +8245,52 @@ ] } }, + "/api/v1/series/bulk/track-for-releases": { + "post": { + "tags": [ + "Bulk Operations" + ], + "summary": "Bulk-enable release tracking for multiple series.", + "description": "For each `series_id` in the request, flips `series_tracking.tracked` to\n`true` and runs the seed pass (auto-derives aliases, `latest_known_*`,\n`track_chapters` / `track_volumes` from existing data). Series that don't\nexist are reported as `outcome: skipped`. Series already tracked are\nreported as `outcome: skipped, detail: \"already tracked\"` and the seed is\nnot re-run (idempotent — a re-run would simply re-derive identical\nvalues, but we skip the work).\n\nMirrors the per-series PATCH `false -> true` transition: same seed\nfunction, same idempotency guarantees.", + "operationId": "bulk_track_series_for_releases", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkSeriesRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Bulk-tracked series", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkTrackForReleasesResponse" + } + } + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + } + }, + "security": [ + { + "bearer_auth": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/bulk/unread": { "post": { "tags": [ @@ -8243,6 +8337,52 @@ ] } }, + "/api/v1/series/bulk/untrack-for-releases": { + "post": { + "tags": [ + "Bulk Operations" + ], + "summary": "Bulk-disable release tracking for multiple series.", + "description": "Flips `series_tracking.tracked` to `false`. Does not delete aliases,\n`latest_known_*`, or other tracking config — the user can re-track\nwithout losing customizations, and the seed will re-derive any\nauto-derived fields on the next track-on transition.", + "operationId": "bulk_untrack_series_for_releases", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkSeriesRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Bulk-untracked series", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkTrackForReleasesResponse" + } + } + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + } + }, + "security": [ + { + "bearer_auth": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/in-progress": { "get": { "tags": [ @@ -17660,6 +17800,27 @@ } } }, + "ApplicabilityResponse": { + "type": "object", + "description": "Response shape for `GET /api/v1/release-sources/applicability`.", + "required": [ + "applicable", + "pluginDisplayNames" + ], + "properties": { + "applicable": { + "type": "boolean", + "description": "`true` when at least one enabled `release_source` plugin applies to\nthe requested library (or, if no `libraryId` was supplied, to *any*\nlibrary). The frontend uses this to decide whether to render the\nper-series Tracking panel and Releases tab, or to show the\nbulk-track menu entry." + }, + "pluginDisplayNames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Plugin display names (or fallback to `name` when no manifest cached\nyet) of the enabled release-source plugins covering this library.\nEmpty when `applicable` is `false`. Useful for surfacing \"Powered by\nMangaUpdates, Nyaa\" hints in the UI." + } + } + }, "AuthorContextDto": { "type": "object", "description": "Author context for template evaluation.", @@ -21365,6 +21526,65 @@ } } }, + "BulkTrackForReleasesItem": { + "type": "object", + "description": "Per-series outcome of a bulk track / untrack operation.\n\nReturned in `BulkTrackForReleasesResponse.results` so the UI can show a\nper-row status (e.g. \"tracked\", \"skipped: not found\", \"errored: …\") without\nre-querying the tracking config endpoint per series.", + "required": [ + "seriesId", + "outcome" + ], + "properties": { + "detail": { + "type": [ + "string", + "null" + ], + "description": "Free-form detail (error message for `errored`, reason for `skipped`).\n`None` for the success cases." + }, + "outcome": { + "type": "string", + "description": "`tracked` | `untracked` | `skipped` | `errored`." + }, + "seriesId": { + "type": "string", + "format": "uuid" + } + } + }, + "BulkTrackForReleasesResponse": { + "type": "object", + "description": "Aggregate result of `POST /series/bulk/track-for-releases` and its untrack\ncounterpart. Counts and per-series outcomes for client-side display.", + "required": [ + "changed", + "alreadyInState", + "errored", + "results" + ], + "properties": { + "alreadyInState": { + "type": "integer", + "description": "Series whose `tracked` flag was already in the target state. No-ops.", + "minimum": 0 + }, + "changed": { + "type": "integer", + "description": "Series successfully flipped to `tracked = true` (or `false` for the\nuntrack endpoint).", + "minimum": 0 + }, + "errored": { + "type": "integer", + "description": "Series that could not be processed (missing, error, etc.).", + "minimum": 0 + }, + "results": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BulkTrackForReleasesItem" + }, + "description": "Per-series outcomes in input order." + } + } + }, "BulkUpdateBookLocksRequest": { "allOf": [ { @@ -32728,6 +32948,13 @@ ], "format": "date-time" }, + "lastSummary": { + "type": [ + "string", + "null" + ], + "description": "One-line summary of the most recent successful poll. Surfaced under\nthe row's status badge so users can see *why* a poll returned no\nannouncements without grepping logs. NULL until the first successful\npoll on the source." + }, "pluginId": { "type": "string", "description": "Owning plugin id, or `core` for in-core synthetic sources.", diff --git a/migration/src/lib.rs b/migration/src/lib.rs index 9655131a..4e9e23ff 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -157,6 +157,8 @@ mod m20260504_000074_add_tracking_languages; mod m20260504_000075_seed_release_tracking_languages; // Release tracking (Phase 8 follow-up): server-wide notification filter settings mod m20260504_000076_seed_release_tracking_notify_filters; +// Release tracking: per-source last-poll summary surfaced in the UI +mod m20260505_000077_add_release_sources_last_summary; pub struct Migrator; @@ -284,6 +286,8 @@ impl MigratorTrait for Migrator { Box::new(m20260504_000075_seed_release_tracking_languages::Migration), // Release tracking (Phase 8 follow-up): notification filter settings Box::new(m20260504_000076_seed_release_tracking_notify_filters::Migration), + // Release tracking: per-source last-poll summary + Box::new(m20260505_000077_add_release_sources_last_summary::Migration), ] } } diff --git a/migration/src/m20260505_000077_add_release_sources_last_summary.rs b/migration/src/m20260505_000077_add_release_sources_last_summary.rs new file mode 100644 index 00000000..664b1d55 --- /dev/null +++ b/migration/src/m20260505_000077_add_release_sources_last_summary.rs @@ -0,0 +1,40 @@ +//! Add `last_summary` column to `release_sources`. +//! +//! Free-form text written by the poll-source task on every successful poll +//! completion (e.g. `"fetched 12 items, matched 0, recorded 0"`). The +//! Release tracking settings UI surfaces it under the per-row status badge +//! so users can see *why* a poll returned no announcements (no tracked +//! series with aliases, upstream not modified, etc.) without grepping +//! container logs. NULL until the first successful poll. + +use sea_orm_migration::prelude::*; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(Alias::new("release_sources")) + .add_column(ColumnDef::new(Alias::new("last_summary")).text()) + .to_owned(), + ) + .await?; + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + manager + .alter_table( + Table::alter() + .table(Alias::new("release_sources")) + .drop_column(Alias::new("last_summary")) + .to_owned(), + ) + .await?; + Ok(()) + } +} diff --git a/src/api/docs.rs b/src/api/docs.rs index de32e991..0f649a38 100644 --- a/src/api/docs.rs +++ b/src/api/docs.rs @@ -272,6 +272,7 @@ The following paths are exempt from rate limiting: v1::handlers::releases::list_release_sources, v1::handlers::releases::update_release_source, v1::handlers::releases::poll_release_source_now, + v1::handlers::releases::get_release_tracking_applicability, // Cover management endpoints v1::handlers::list_series_covers, @@ -346,6 +347,8 @@ The following paths are exempt from rate limiting: v1::handlers::bulk_mark_series_as_unread, v1::handlers::bulk_analyze_series, v1::handlers::bulk_renumber_series, + v1::handlers::bulk_track_series_for_releases, + v1::handlers::bulk_untrack_series_for_releases, v1::handlers::bulk_generate_series_thumbnails, v1::handlers::bulk_generate_series_book_thumbnails, v1::handlers::bulk_reprocess_series_titles, @@ -710,6 +713,7 @@ The following paths are exempt from rate limiting: v1::dto::release::ReleaseSourceListResponse, v1::dto::release::UpdateReleaseSourceRequest, v1::dto::release::PollNowResponse, + v1::handlers::releases::ApplicabilityResponse, v1::dto::PaginatedResponse<v1::dto::release::ReleaseLedgerEntryDto>, // External Rating DTOs @@ -828,6 +832,8 @@ The following paths are exempt from rate limiting: v1::dto::BulkSeriesRequest, v1::dto::BulkAnalyzeSeriesRequest, v1::dto::BulkAnalyzeResponse, + v1::dto::BulkTrackForReleasesItem, + v1::dto::BulkTrackForReleasesResponse, v1::dto::BulkRenumberSeriesRequest, v1::dto::BulkGenerateBookThumbnailsRequest, v1::dto::BulkGenerateSeriesBookThumbnailsRequest, diff --git a/src/api/routes/v1/dto/release.rs b/src/api/routes/v1/dto/release.rs index 406824b2..6a84d55f 100644 --- a/src/api/routes/v1/dto/release.rs +++ b/src/api/routes/v1/dto/release.rs @@ -136,6 +136,12 @@ pub struct ReleaseSourceDto { /// Source-specific configuration (free-form). #[serde(skip_serializing_if = "Option::is_none")] pub config: Option<serde_json::Value>, + /// One-line summary of the most recent successful poll. Surfaced under + /// the row's status badge so users can see *why* a poll returned no + /// announcements without grepping logs. NULL until the first successful + /// poll on the source. + #[serde(skip_serializing_if = "Option::is_none")] + pub last_summary: Option<String>, pub created_at: DateTime<Utc>, pub updated_at: DateTime<Utc>, } @@ -155,6 +161,7 @@ impl From<release_sources::Model> for ReleaseSourceDto { last_error_at: m.last_error_at, etag: m.etag, config: m.config, + last_summary: m.last_summary, created_at: m.created_at, updated_at: m.updated_at, } diff --git a/src/api/routes/v1/dto/tracking.rs b/src/api/routes/v1/dto/tracking.rs index 56f03f0a..1609fe27 100644 --- a/src/api/routes/v1/dto/tracking.rs +++ b/src/api/routes/v1/dto/tracking.rs @@ -218,3 +218,40 @@ pub struct CreateSeriesAliasRequest { #[serde(default)] pub source: Option<String>, } + +// ============================================================================= +// Bulk track-for-releases DTOs +// ============================================================================= + +/// Per-series outcome of a bulk track / untrack operation. +/// +/// Returned in `BulkTrackForReleasesResponse.results` so the UI can show a +/// per-row status (e.g. "tracked", "skipped: not found", "errored: …") without +/// re-querying the tracking config endpoint per series. +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct BulkTrackForReleasesItem { + pub series_id: Uuid, + /// `tracked` | `untracked` | `skipped` | `errored`. + pub outcome: String, + /// Free-form detail (error message for `errored`, reason for `skipped`). + /// `None` for the success cases. + #[serde(skip_serializing_if = "Option::is_none")] + pub detail: Option<String>, +} + +/// Aggregate result of `POST /series/bulk/track-for-releases` and its untrack +/// counterpart. Counts and per-series outcomes for client-side display. +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct BulkTrackForReleasesResponse { + /// Series successfully flipped to `tracked = true` (or `false` for the + /// untrack endpoint). + pub changed: usize, + /// Series whose `tracked` flag was already in the target state. No-ops. + pub already_in_state: usize, + /// Series that could not be processed (missing, error, etc.). + pub errored: usize, + /// Per-series outcomes in input order. + pub results: Vec<BulkTrackForReleasesItem>, +} diff --git a/src/api/routes/v1/handlers/bulk.rs b/src/api/routes/v1/handlers/bulk.rs index d92db85a..1509e107 100644 --- a/src/api/routes/v1/handlers/bulk.rs +++ b/src/api/routes/v1/handlers/bulk.rs @@ -7,17 +7,19 @@ use super::super::dto::{ BulkAnalyzeBooksRequest, BulkAnalyzeResponse, BulkAnalyzeSeriesRequest, BulkBooksRequest, BulkGenerateBookThumbnailsRequest, BulkGenerateSeriesBookThumbnailsRequest, BulkGenerateSeriesThumbnailsRequest, BulkMetadataResetResponse, BulkRenumberSeriesRequest, - BulkReprocessSeriesTitlesRequest, BulkSeriesRequest, BulkTaskResponse, MarkReadResponse, + BulkReprocessSeriesTitlesRequest, BulkSeriesRequest, BulkTaskResponse, + BulkTrackForReleasesItem, BulkTrackForReleasesResponse, MarkReadResponse, }; use crate::api::{AppState, error::ApiError, extractors::AuthContext, permissions::Permission}; use crate::db::repositories::{ AlternateTitleRepository, BookRepository, ExternalLinkRepository, ExternalRatingRepository, GenreRepository, ReadProgressRepository, SeriesCoversRepository, SeriesExternalIdRepository, - SeriesMetadataRepository, SeriesRepository, SharingTagRepository, TagRepository, - TaskRepository, + SeriesMetadataRepository, SeriesRepository, SeriesTrackingRepository, SharingTagRepository, + TagRepository, TaskRepository, TrackingUpdate, }; use crate::events::{EntityChangeEvent, EntityEvent}; use crate::require_permission; +use crate::services::release::seed::seed_tracking_for_series; use crate::tasks::types::TaskType; use axum::{Json, extract::State}; use chrono::Utc; @@ -427,6 +429,262 @@ pub async fn bulk_analyze_series( })) } +// ============================================================================ +// Release-tracking Bulk Handlers +// ============================================================================ + +/// Bulk-enable release tracking for multiple series. +/// +/// For each `series_id` in the request, flips `series_tracking.tracked` to +/// `true` and runs the seed pass (auto-derives aliases, `latest_known_*`, +/// `track_chapters` / `track_volumes` from existing data). Series that don't +/// exist are reported as `outcome: skipped`. Series already tracked are +/// reported as `outcome: skipped, detail: "already tracked"` and the seed is +/// not re-run (idempotent — a re-run would simply re-derive identical +/// values, but we skip the work). +/// +/// Mirrors the per-series PATCH `false -> true` transition: same seed +/// function, same idempotency guarantees. +#[utoipa::path( + post, + path = "/api/v1/series/bulk/track-for-releases", + request_body = BulkSeriesRequest, + responses( + (status = 200, description = "Bulk-tracked series", body = BulkTrackForReleasesResponse), + (status = 401, description = "Unauthorized"), + (status = 403, description = "Forbidden"), + ), + security( + ("bearer_auth" = []), + ("api_key" = []) + ), + tag = "Bulk Operations" +)] +pub async fn bulk_track_series_for_releases( + State(state): State<Arc<AppState>>, + auth: AuthContext, + Json(request): Json<BulkSeriesRequest>, +) -> Result<Json<BulkTrackForReleasesResponse>, ApiError> { + require_permission!(auth, Permission::SeriesWrite)?; + + let mut response = BulkTrackForReleasesResponse { + changed: 0, + already_in_state: 0, + errored: 0, + results: Vec::with_capacity(request.series_ids.len()), + }; + + for series_id in request.series_ids { + let outcome = track_one_series(&state, series_id, &auth).await; + match outcome.outcome.as_str() { + "tracked" => response.changed += 1, + "skipped" => response.already_in_state += 1, + _ => response.errored += 1, + } + response.results.push(outcome); + } + + Ok(Json(response)) +} + +/// Bulk-disable release tracking for multiple series. +/// +/// Flips `series_tracking.tracked` to `false`. Does not delete aliases, +/// `latest_known_*`, or other tracking config — the user can re-track +/// without losing customizations, and the seed will re-derive any +/// auto-derived fields on the next track-on transition. +#[utoipa::path( + post, + path = "/api/v1/series/bulk/untrack-for-releases", + request_body = BulkSeriesRequest, + responses( + (status = 200, description = "Bulk-untracked series", body = BulkTrackForReleasesResponse), + (status = 401, description = "Unauthorized"), + (status = 403, description = "Forbidden"), + ), + security( + ("bearer_auth" = []), + ("api_key" = []) + ), + tag = "Bulk Operations" +)] +pub async fn bulk_untrack_series_for_releases( + State(state): State<Arc<AppState>>, + auth: AuthContext, + Json(request): Json<BulkSeriesRequest>, +) -> Result<Json<BulkTrackForReleasesResponse>, ApiError> { + require_permission!(auth, Permission::SeriesWrite)?; + + let mut response = BulkTrackForReleasesResponse { + changed: 0, + already_in_state: 0, + errored: 0, + results: Vec::with_capacity(request.series_ids.len()), + }; + + for series_id in request.series_ids { + let outcome = untrack_one_series(&state, series_id, &auth).await; + match outcome.outcome.as_str() { + "untracked" => response.changed += 1, + "skipped" => response.already_in_state += 1, + _ => response.errored += 1, + } + response.results.push(outcome); + } + + Ok(Json(response)) +} + +/// Track a single series and seed defaults. Helper for `bulk_track_series_for_releases`. +/// +/// Returns a structured per-series outcome rather than propagating errors so +/// one bad series doesn't fail the whole bulk request. +async fn track_one_series( + state: &AppState, + series_id: Uuid, + auth: &AuthContext, +) -> BulkTrackForReleasesItem { + let series = match SeriesRepository::get_by_id(&state.db, series_id).await { + Ok(Some(s)) => s, + Ok(None) => { + return BulkTrackForReleasesItem { + series_id, + outcome: "skipped".to_string(), + detail: Some("series not found".to_string()), + }; + } + Err(e) => { + return BulkTrackForReleasesItem { + series_id, + outcome: "errored".to_string(), + detail: Some(format!("lookup failed: {}", e)), + }; + } + }; + + // Skip if already tracked — idempotent no-op. + let already_tracked = SeriesTrackingRepository::get(&state.db, series_id) + .await + .ok() + .flatten() + .map(|r| r.tracked) + .unwrap_or(false); + if already_tracked { + return BulkTrackForReleasesItem { + series_id, + outcome: "skipped".to_string(), + detail: Some("already tracked".to_string()), + }; + } + + // Seed first so the auto-derived fields are populated, then flip the + // tracked flag in a second pass. Same order the per-series PATCH uses. + if let Err(e) = seed_tracking_for_series(&state.db, series_id).await { + return BulkTrackForReleasesItem { + series_id, + outcome: "errored".to_string(), + detail: Some(format!("seed failed: {}", e)), + }; + } + let update = TrackingUpdate { + tracked: Some(true), + ..Default::default() + }; + if let Err(e) = SeriesTrackingRepository::upsert(&state.db, series_id, update).await { + return BulkTrackForReleasesItem { + series_id, + outcome: "errored".to_string(), + detail: Some(format!("upsert failed: {}", e)), + }; + } + + let event = EntityChangeEvent { + event: EntityEvent::SeriesUpdated { + series_id, + library_id: series.library_id, + fields: Some(vec!["tracking".to_string()]), + }, + timestamp: Utc::now(), + user_id: Some(auth.user_id), + }; + let _ = state.event_broadcaster.emit(event); + + BulkTrackForReleasesItem { + series_id, + outcome: "tracked".to_string(), + detail: None, + } +} + +/// Untrack a single series. Helper for `bulk_untrack_series_for_releases`. +async fn untrack_one_series( + state: &AppState, + series_id: Uuid, + auth: &AuthContext, +) -> BulkTrackForReleasesItem { + let series = match SeriesRepository::get_by_id(&state.db, series_id).await { + Ok(Some(s)) => s, + Ok(None) => { + return BulkTrackForReleasesItem { + series_id, + outcome: "skipped".to_string(), + detail: Some("series not found".to_string()), + }; + } + Err(e) => { + return BulkTrackForReleasesItem { + series_id, + outcome: "errored".to_string(), + detail: Some(format!("lookup failed: {}", e)), + }; + } + }; + + // No tracking row at all -> nothing to do, treat as already in target state. + let already_untracked = SeriesTrackingRepository::get(&state.db, series_id) + .await + .ok() + .flatten() + .map(|r| !r.tracked) + .unwrap_or(true); + if already_untracked { + return BulkTrackForReleasesItem { + series_id, + outcome: "skipped".to_string(), + detail: Some("already untracked".to_string()), + }; + } + + let update = TrackingUpdate { + tracked: Some(false), + ..Default::default() + }; + if let Err(e) = SeriesTrackingRepository::upsert(&state.db, series_id, update).await { + return BulkTrackForReleasesItem { + series_id, + outcome: "errored".to_string(), + detail: Some(format!("upsert failed: {}", e)), + }; + } + + let event = EntityChangeEvent { + event: EntityEvent::SeriesUpdated { + series_id, + library_id: series.library_id, + fields: Some(vec!["tracking".to_string()]), + }, + timestamp: Utc::now(), + user_id: Some(auth.user_id), + }; + let _ = state.event_broadcaster.emit(event); + + BulkTrackForReleasesItem { + series_id, + outcome: "untracked".to_string(), + detail: None, + } +} + // ============================================================================ // Thumbnail Bulk Handlers // ============================================================================ diff --git a/src/api/routes/v1/handlers/releases.rs b/src/api/routes/v1/handlers/releases.rs index 0b85ba0a..d5862fec 100644 --- a/src/api/routes/v1/handlers/releases.rs +++ b/src/api/routes/v1/handlers/releases.rs @@ -38,8 +38,8 @@ use crate::api::{ }; use crate::db::entities::release_ledger::state as ledger_state; use crate::db::repositories::{ - LedgerInboxFilter, ReleaseLedgerRepository, ReleaseSourceRepository, ReleaseSourceUpdate, - SeriesRepository, + LedgerInboxFilter, PluginsRepository, ReleaseLedgerRepository, ReleaseSourceRepository, + ReleaseSourceUpdate, SeriesRepository, }; use crate::events::{EntityChangeEvent, EntityEvent}; @@ -567,3 +567,115 @@ pub async fn poll_release_source_now( fn _opening_api_keepalive() -> ReleaseLedgerListResponse { ReleaseLedgerListResponse { entries: vec![] } } + +// ============================================================================= +// Applicability lookup +// ============================================================================= + +/// Query string for `GET /api/v1/release-sources/applicability`. +#[derive(Debug, Deserialize, utoipa::IntoParams)] +#[serde(rename_all = "camelCase")] +pub struct ApplicabilityQuery { + /// Optional library scope. When provided, only plugins that apply to + /// this library are considered (a plugin's `library_ids` field is + /// either empty = all, or contains this UUID). + #[serde(default)] + pub library_id: Option<Uuid>, +} + +/// Response shape for `GET /api/v1/release-sources/applicability`. +#[derive(Debug, serde::Serialize, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct ApplicabilityResponse { + /// `true` when at least one enabled `release_source` plugin applies to + /// the requested library (or, if no `libraryId` was supplied, to *any* + /// library). The frontend uses this to decide whether to render the + /// per-series Tracking panel and Releases tab, or to show the + /// bulk-track menu entry. + pub applicable: bool, + /// Plugin display names (or fallback to `name` when no manifest cached + /// yet) of the enabled release-source plugins covering this library. + /// Empty when `applicable` is `false`. Useful for surfacing "Powered by + /// MangaUpdates, Nyaa" hints in the UI. + pub plugin_display_names: Vec<String>, +} + +/// Whether release tracking is available for a given library. +/// +/// Read-only, requires only `SeriesRead`: the response carries no +/// admin-sensitive data (no plugin IDs, no configs, no library +/// allowlists), just the boolean and friendly display names. Used by the +/// frontend to: +/// +/// - hide the per-series Tracking panel + Releases tab on libraries with +/// no applicable plugin (cleaner UX); +/// - decide whether to show the "Track for releases" / "Don't track for +/// releases" entries in the bulk-selection menu. +#[utoipa::path( + get, + path = "/api/v1/release-sources/applicability", + params(ApplicabilityQuery), + responses( + (status = 200, description = "Applicability info", body = ApplicabilityResponse), + (status = 403, description = "SeriesRead permission required"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Releases" +)] +pub async fn get_release_tracking_applicability( + State(state): State<Arc<AuthState>>, + auth: AuthContext, + axum::extract::Query(query): axum::extract::Query<ApplicabilityQuery>, +) -> Result<Json<ApplicabilityResponse>, ApiError> { + auth.require_permission(&Permission::SeriesRead)?; + + let plugins = PluginsRepository::get_enabled(&state.db) + .await + .map_err(|e| ApiError::Internal(format!("Failed to load plugins: {}", e)))?; + + let mut display_names: Vec<String> = Vec::new(); + for plugin in plugins { + // Capability check via the cached manifest. We deserialize the + // shape lightly via the canonical `PluginManifest` struct so + // a malformed manifest doesn't claim release-source capability. + let Some(manifest_json) = plugin.manifest.as_ref() else { + continue; + }; + let Ok(manifest) = serde_json::from_value::< + crate::services::plugin::protocol::PluginManifest, + >(manifest_json.clone()) else { + continue; + }; + if manifest.capabilities.release_source.is_none() { + continue; + } + + // Library-scope check. The DB column is JSON; an empty array means + // "all libraries". Anything not deserializing into a Vec<Uuid> + // (NULL, non-array, etc.) is treated as "all libraries" too — + // that matches the existing convention elsewhere in the codebase. + let library_ids: Vec<Uuid> = + serde_json::from_value(plugin.library_ids.clone()).unwrap_or_default(); + if let Some(lib) = query.library_id + && !library_ids.is_empty() + && !library_ids.contains(&lib) + { + continue; + } + + let label = if plugin.display_name.trim().is_empty() { + plugin.name.clone() + } else { + plugin.display_name.clone() + }; + display_names.push(label); + } + + Ok(Json(ApplicabilityResponse { + applicable: !display_names.is_empty(), + plugin_display_names: display_names, + })) +} diff --git a/src/api/routes/v1/handlers/tracking.rs b/src/api/routes/v1/handlers/tracking.rs index 8d5f101d..1b35a3cf 100644 --- a/src/api/routes/v1/handlers/tracking.rs +++ b/src/api/routes/v1/handlers/tracking.rs @@ -31,6 +31,7 @@ use crate::db::repositories::{ }; use crate::events::{EntityChangeEvent, EntityEvent}; use crate::require_permission; +use crate::services::release::seed::seed_tracking_for_series; // ============================================================================= // Tracking config handlers @@ -112,6 +113,29 @@ pub async fn update_series_tracking( .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; + // Detect the false -> true tracked transition so we can seed defaults + // before applying the user's patch. This eliminates the empty-form UX + // where a freshly-tracked series has no aliases / no latest_known_*. + // + // The user's patch is applied *after* the seed, so any explicit value + // they sent (e.g. a custom latest_known_chapter override) still wins. + let was_tracked = SeriesTrackingRepository::get(&state.db, series_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch tracking: {}", e)))? + .map(|r| r.tracked) + .unwrap_or(false); + let is_flipping_to_tracked = matches!(request.tracked, Some(true)) && !was_tracked; + if is_flipping_to_tracked && let Err(e) = seed_tracking_for_series(&state.db, series_id).await { + // Best-effort: if seeding fails (e.g. transient DB error), still + // honor the user's intent to flip tracked on. The next re-toggle + // or a manual backfill task can re-seed. + tracing::warn!( + "Seed failed for series {} on track-on transition: {}", + series_id, + e + ); + } + let update = TrackingUpdate { tracked: request.tracked, tracking_status: request.tracking_status, diff --git a/src/api/routes/v1/routes/releases.rs b/src/api/routes/v1/routes/releases.rs index 55a351c2..f770f6fa 100644 --- a/src/api/routes/v1/routes/releases.rs +++ b/src/api/routes/v1/routes/releases.rs @@ -28,6 +28,12 @@ pub fn routes(_state: Arc<AppState>) -> Router<Arc<AppState>> { "/releases/{release_id}/mark-acquired", post(handlers::releases::mark_release_acquired), ) + // Applicability (SeriesRead required) — used by the frontend to + // hide release-tracking UI on libraries not covered by any plugin. + .route( + "/release-sources/applicability", + get(handlers::releases::get_release_tracking_applicability), + ) // Source admin (PluginsManage required) .route( "/release-sources", diff --git a/src/api/routes/v1/routes/series.rs b/src/api/routes/v1/routes/series.rs index 14f4ada2..432aa18f 100644 --- a/src/api/routes/v1/routes/series.rs +++ b/src/api/routes/v1/routes/series.rs @@ -271,6 +271,14 @@ pub fn routes(_state: Arc<AppState>) -> Router<Arc<AppState>> { "/series/bulk/renumber", post(handlers::bulk_renumber_series), ) + .route( + "/series/bulk/track-for-releases", + post(handlers::bulk_track_series_for_releases), + ) + .route( + "/series/bulk/untrack-for-releases", + post(handlers::bulk_untrack_series_for_releases), + ) .route( "/series/bulk/thumbnails/generate", post(handlers::bulk_generate_series_thumbnails), diff --git a/src/db/entities/release_sources.rs b/src/db/entities/release_sources.rs index c7414669..9d845975 100644 --- a/src/db/entities/release_sources.rs +++ b/src/db/entities/release_sources.rs @@ -31,6 +31,13 @@ pub struct Model { pub last_error_at: Option<DateTime<Utc>>, pub etag: Option<String>, pub config: Option<serde_json::Value>, + /// One-line human-readable summary of the most recent poll (e.g. + /// `"fetched 12 items, matched 0, recorded 0"`). Written by the + /// poll-source task on every successful completion. NULL until the + /// first successful poll. Surfaced by the Release tracking settings UI + /// under the per-row status badge so users can tell *why* a poll + /// returned no announcements without grepping container logs. + pub last_summary: Option<String>, pub created_at: DateTime<Utc>, pub updated_at: DateTime<Utc>, } diff --git a/src/db/repositories/release_sources.rs b/src/db/repositories/release_sources.rs index 35c81c32..1b58b97e 100644 --- a/src/db/repositories/release_sources.rs +++ b/src/db/repositories/release_sources.rs @@ -122,6 +122,7 @@ impl ReleaseSourceRepository { last_error_at: Set(None), etag: Set(None), config: Set(params.config), + last_summary: Set(None), created_at: Set(now), updated_at: Set(now), }; @@ -242,6 +243,7 @@ impl ReleaseSourceRepository { id: Uuid, polled_at: DateTime<Utc>, etag: Option<String>, + summary: Option<String>, ) -> Result<()> { let existing = ReleaseSources::find_by_id(id) .one(db) @@ -254,6 +256,11 @@ impl ReleaseSourceRepository { if let Some(e) = etag { active.etag = Set(Some(e)); } + // None passed by the caller means "leave alone"; older callers can pass + // None and keep their existing behavior. Pass Some("…") to overwrite. + if let Some(s) = summary { + active.last_summary = Set(Some(s)); + } active.updated_at = Set(Utc::now()); active.update(db).await?; Ok(()) @@ -414,13 +421,14 @@ mod tests { .unwrap(); assert_eq!(after_err.last_error.as_deref(), Some("503 upstream")); - // Successful poll clears the error and sets etag. + // Successful poll clears the error and sets etag + summary. let polled_at = Utc::now(); ReleaseSourceRepository::record_poll_success( conn, s.id, polled_at, Some("\"etag-1\"".to_string()), + Some("Fetched 0 items".to_string()), ) .await .unwrap(); @@ -432,6 +440,7 @@ mod tests { assert_eq!(after_ok.last_error_at, None); assert_eq!(after_ok.last_polled_at, Some(polled_at)); assert_eq!(after_ok.etag.as_deref(), Some("\"etag-1\"")); + assert_eq!(after_ok.last_summary.as_deref(), Some("Fetched 0 items")); } #[tokio::test] @@ -444,7 +453,7 @@ mod tests { // First a success. let success_at = Utc::now(); - ReleaseSourceRepository::record_poll_success(conn, s.id, success_at, None) + ReleaseSourceRepository::record_poll_success(conn, s.id, success_at, None, None) .await .unwrap(); diff --git a/src/services/plugin/manager.rs b/src/services/plugin/manager.rs index dacf857a..27f0d61e 100644 --- a/src/services/plugin/manager.rs +++ b/src/services/plugin/manager.rs @@ -407,36 +407,59 @@ impl PluginManager { let count = enabled_plugins.len(); debug!("Found {} enabled plugins in database", count); - let mut plugins = self.plugins.write().await; + // Identify release-source plugins so we can eager-spawn them after the + // write lock is released; this lets their `onInitialize` run and call + // `releases/register_sources` to materialize source rows on startup. + let eager_spawn_ids: Vec<Uuid> = enabled_plugins + .iter() + .filter(|p| Self::is_release_source(p)) + .map(|p| p.id) + .collect(); - // Preserve existing handles - we don't want to kill running plugin processes - // Just update the db_config for existing entries and add new ones - let mut existing_handles: HashMap<Uuid, Option<Arc<PluginHandle>>> = HashMap::new(); - for (id, entry) in plugins.drain() { - existing_handles.insert(id, entry.handle); - } + { + let mut plugins = self.plugins.write().await; - for plugin in enabled_plugins { - let id = plugin.id; - debug!("Loading plugin: {} ({})", plugin.name, id); - let mut entry = PluginEntry::new(plugin); - // Restore handle if we had one - if let Some(handle) = existing_handles.remove(&id) { - entry.handle = handle; + // Preserve existing handles - we don't want to kill running plugin processes + // Just update the db_config for existing entries and add new ones + let mut existing_handles: HashMap<Uuid, Option<Arc<PluginHandle>>> = HashMap::new(); + for (id, entry) in plugins.drain() { + existing_handles.insert(id, entry.handle); } - plugins.insert(id, entry); - } - // Stop any handles for plugins that are no longer enabled - for (_id, handle) in existing_handles { - if let Some(h) = handle { - let _ = h.stop().await; + for plugin in enabled_plugins { + let id = plugin.id; + debug!("Loading plugin: {} ({})", plugin.name, id); + let mut entry = PluginEntry::new(plugin); + // Restore handle if we had one + if let Some(handle) = existing_handles.remove(&id) { + entry.handle = handle; + } + plugins.insert(id, entry); + } + + // Stop any handles for plugins that are no longer enabled + for (_id, handle) in existing_handles { + if let Some(h) = handle { + let _ = h.stop().await; + } } } // Update cache timestamp *self.cache_loaded_at.write().await = Some(Instant::now()); + // Eager-start release-source plugins so they can register their + // sources on boot. Best-effort: a single plugin failing must not + // block the rest of startup. + for id in eager_spawn_ids { + if let Err(e) = self.get_or_spawn(id).await { + warn!( + "Eager start of release-source plugin {} failed on load_all: {}", + id, e + ); + } + } + info!("Loaded {} enabled plugins from database", count); Ok(count) } @@ -493,28 +516,54 @@ impl PluginManager { plugin_id, plugin.name, plugin.enabled, plugin.scopes ); - let mut plugins = self.plugins.write().await; + // Note whether this plugin should be eagerly spawned after the + // reload completes (release-source plugins need their onInitialize + // to run so they can call `releases/register_sources` — nothing + // else would trigger a spawn). + let eager_spawn = plugin.enabled && Self::is_release_source(&plugin); - if plugin.enabled { - // If plugin exists and has a handle, stop it first - if let Some(entry) = plugins.get_mut(&plugin_id) { - debug!("Updating existing plugin entry for {}", plugin_id); - if let Some(handle) = entry.handle.take() { - let _ = handle.stop().await; + { + let mut plugins = self.plugins.write().await; + + if plugin.enabled { + // If plugin exists and has a handle, stop it first + if let Some(entry) = plugins.get_mut(&plugin_id) { + debug!("Updating existing plugin entry for {}", plugin_id); + if let Some(handle) = entry.handle.take() { + let _ = handle.stop().await; + } + entry.update_config(plugin); + } else { + debug!("Inserting new plugin entry for {}", plugin_id); + plugins.insert(plugin_id, PluginEntry::new(plugin)); } - entry.update_config(plugin); + debug!("Plugin manager now has {} plugins loaded", plugins.len()); } else { - debug!("Inserting new plugin entry for {}", plugin_id); - plugins.insert(plugin_id, PluginEntry::new(plugin)); + // Plugin is disabled, remove it from managed plugins + debug!("Plugin {} is disabled, removing from memory", plugin_id); + if let Some(entry) = plugins.remove(&plugin_id) + && let Some(handle) = entry.handle + { + let _ = handle.stop().await; + } } - debug!("Plugin manager now has {} plugins loaded", plugins.len()); - } else { - // Plugin is disabled, remove it from managed plugins - debug!("Plugin {} is disabled, removing from memory", plugin_id); - if let Some(entry) = plugins.remove(&plugin_id) - && let Some(handle) = entry.handle - { - let _ = handle.stop().await; + } + + if eager_spawn { + // Spawn out-of-band so reload returns promptly even if the + // plugin's onInitialize is slow. `get_or_spawn` takes its own + // locks and is safe to call here. + if let Err(e) = self.get_or_spawn(plugin_id).await { + warn!( + "Eager start of release-source plugin {} failed: {}", + plugin_id, e + ); + } else { + debug!( + "Eager-started release-source plugin {} so onInitialize \ + can register its sources", + plugin_id + ); } } @@ -954,6 +1003,22 @@ impl PluginManager { manifest.oauth } + /// Whether this plugin's cached manifest declares the `release_source` + /// capability. Release-source plugins must be eagerly spawned (rather + /// than lazy on first call) so their `onInitialize` runs and the plugin + /// can call `releases/register_sources` to materialize its source rows + /// — otherwise the scheduler has nothing to poll, and nothing else + /// would ever trigger a spawn. + fn is_release_source(plugin: &plugins::Model) -> bool { + let Some(manifest_json) = plugin.manifest.as_ref() else { + return false; + }; + let Ok(manifest) = serde_json::from_value::<PluginManifest>(manifest_json.clone()) else { + return false; + }; + manifest.capabilities.release_source.is_some() + } + /// Get the OAuth client_id for a plugin (config override > manifest default) fn get_oauth_client_id(plugin: &plugins::Model) -> Option<String> { // Check plugin config for client_id override diff --git a/src/services/plugin/permissions.rs b/src/services/plugin/permissions.rs index 98a90bff..695f5a17 100644 --- a/src/services/plugin/permissions.rs +++ b/src/services/plugin/permissions.rs @@ -75,7 +75,8 @@ pub fn required_capability(method: &str) -> Option<RequiredCapability> { methods::RELEASES_LIST_TRACKED | methods::RELEASES_RECORD | methods::RELEASES_SOURCE_STATE_GET - | methods::RELEASES_SOURCE_STATE_SET => Some(RequiredCapability::ReleaseSource), + | methods::RELEASES_SOURCE_STATE_SET + | methods::RELEASES_REGISTER_SOURCES => Some(RequiredCapability::ReleaseSource), _ => None, } @@ -164,6 +165,7 @@ mod tests { methods::RELEASES_RECORD, methods::RELEASES_SOURCE_STATE_GET, methods::RELEASES_SOURCE_STATE_SET, + methods::RELEASES_REGISTER_SOURCES, ] { // Release-source plugin: allowed. assert!(enforce(m, &release_caps()).is_ok(), "{m} should be allowed"); diff --git a/src/services/plugin/releases_handler.rs b/src/services/plugin/releases_handler.rs index 2d8bd441..402e9423 100644 --- a/src/services/plugin/releases_handler.rs +++ b/src/services/plugin/releases_handler.rs @@ -871,6 +871,7 @@ pub fn is_releases_method(method: &str) -> bool { | "releases/record" | "releases/source_state/get" | "releases/source_state/set" + | "releases/register_sources" ) } @@ -1244,6 +1245,7 @@ mod tests { source_id, Utc::now(), Some("etag-123".to_string()), + None, ) .await .unwrap(); @@ -1331,6 +1333,7 @@ mod tests { assert!(is_releases_method(methods::RELEASES_RECORD)); assert!(is_releases_method(methods::RELEASES_SOURCE_STATE_GET)); assert!(is_releases_method(methods::RELEASES_SOURCE_STATE_SET)); + assert!(is_releases_method(methods::RELEASES_REGISTER_SOURCES)); assert!(!is_releases_method("releases/poll")); assert!(!is_releases_method("storage/get")); } diff --git a/src/services/plugin/rpc.rs b/src/services/plugin/rpc.rs index 53bddd22..2e3c03a2 100644 --- a/src/services/plugin/rpc.rs +++ b/src/services/plugin/rpc.rs @@ -421,20 +421,25 @@ async fn dispatch_reverse_rpc( let ctx_guard = reverse_ctx.read().await; // 1. Permission check. If capabilities haven't been set yet (i.e. the - // plugin tried to make a reverse-RPC call before initialize - // returned), we treat it as denied — there's nothing we can match - // against. + // plugin tried to make a reverse-RPC call before the host installed + // the per-plugin reverse-RPC handlers), we return METHOD_NOT_FOUND + // rather than AUTH_FAILED. From the plugin's perspective the method + // isn't dispatchable *yet* — distinguishing this from a real + // permission denial lets the plugin SDK retry with backoff to ride + // out the brief initialization race (see e.g. release-nyaa's + // `registerSources` retry on -32601). AUTH_FAILED stays reserved + // for actual capability-declined-method denials. let caps = match ctx_guard.capabilities.as_ref() { Some(c) => c, None => { warn!( method = %method, - "Reverse-RPC call before plugin initialized; rejecting" + "Reverse-RPC call before plugin initialized; deferring (METHOD_NOT_FOUND)" ); return JsonRpcResponse::error( Some(request_id), JsonRpcError::new( - error_codes::AUTH_FAILED, + error_codes::METHOD_NOT_FOUND, "plugin not initialized; capabilities unknown", ), ); diff --git a/src/services/release/mod.rs b/src/services/release/mod.rs index 75dfee85..883d8177 100644 --- a/src/services/release/mod.rs +++ b/src/services/release/mod.rs @@ -12,6 +12,9 @@ //! - [`upstream_gap`] — Phase 5 metadata-derived publication-gap signal //! surfaced on the series DTO. Read-side only; does not write to the //! release ledger. +//! - [`seed`] — derives tracking defaults (aliases, `latest_known_*`, +//! per-axis tracking flags) from existing series data so a user toggling +//! tracking on doesn't have to fill in a setup form. //! //! Plugins emit candidates over the reverse-RPC channel; the matcher applies //! the threshold and hands the survivors to the ledger repository, which is @@ -22,4 +25,5 @@ pub mod candidate; pub mod languages; pub mod matcher; pub mod schedule; +pub mod seed; pub mod upstream_gap; diff --git a/src/services/release/seed.rs b/src/services/release/seed.rs new file mode 100644 index 00000000..3a824bea --- /dev/null +++ b/src/services/release/seed.rs @@ -0,0 +1,525 @@ +//! Seed defaults for `series_tracking` rows. +//! +//! Called whenever a series transitions to `tracked = true`, and from the +//! retired-but-still-routed `BackfillTrackingFromMetadata` task. The goal is +//! to remove the empty-form UX where a user toggles tracking on and is then +//! presented with a panel full of inputs they have to manually populate. +//! +//! What gets seeded: +//! +//! - **Aliases** (`series_aliases`): inserted from `series.name`, +//! `series_metadata.title`, `series_metadata.title_sort`, and English +//! alternate titles. Non-Latin (CJK, Korean, Cyrillic, …) aliases are +//! skipped today because the alias matcher in the Nyaa / MangaUpdates +//! plugins normalizes Latin text only — non-Latin entries would never +//! match against typical uploader filenames and would just clutter the +//! alias list. Append-only: existing aliases (including user-added) are +//! never deleted by re-seeding. +//! +//! - **`latest_known_chapter` / `latest_known_volume`**: set to the local +//! max chapter / volume across the series's books. The first poll after +//! seeding then announces only releases strictly above the high-water +//! mark, so a user with v01..v15 on disk doesn't get spammed with +//! announcements for chapters they already own. Overwritten on every +//! re-seed (per the "reset all to derived defaults on re-track" rule). +//! +//! - **`track_chapters` / `track_volumes`**: inferred from the series's +//! book classification. If any book in the series has +//! `book_metadata.chapter` populated, `track_chapters = true`; same for +//! volumes. A series organized purely by volume gets `track_chapters = +//! false`, suppressing chapter-axis announcements. If neither axis has +//! any classified data (fresh import), both default to `true` so +//! announcements aren't silently dropped. +//! +//! `tracked` itself is **not** flipped here — that's the caller's +//! responsibility, since this function is called from both the per-series +//! PATCH handler (which interprets the user's intent) and the bulk +//! track-all endpoint. +//! +//! Re-running the seed on an already-tracked series is safe and is the +//! intended idempotent behavior. The retired backfill task uses this +//! property to refresh derived state across all series after a metadata +//! refresh. + +use anyhow::{Context, Result}; +use sea_orm::DatabaseConnection; +use uuid::Uuid; + +use crate::db::entities::series_aliases::alias_source; +use crate::db::repositories::{ + AlternateTitleRepository, SeriesAliasRepository, SeriesMetadataRepository, SeriesRepository, + SeriesTrackingRepository, TrackingUpdate, +}; + +/// Outcome of a seed run, suitable for logging and surfacing in API responses. +/// +/// `PartialEq` (not `Eq`) because `f32` doesn't have total equality. Tests +/// compare individual fields rather than whole reports anyway. +#[derive(Debug, Default, Clone, PartialEq)] +pub struct SeedReport { + /// Aliases newly inserted (does not count duplicates skipped). + pub aliases_inserted: usize, + /// Aliases skipped because they were not Latin-script. + pub aliases_skipped_non_latin: usize, + /// Aliases skipped because an equivalent already existed for the series. + pub aliases_skipped_duplicate: usize, + /// Final `track_chapters` value after seeding. + pub track_chapters: bool, + /// Final `track_volumes` value after seeding. + pub track_volumes: bool, + /// Final `latest_known_chapter` after seeding (`None` when no books + /// have a classified chapter). f32 to match the aggregate column. + pub latest_known_chapter: Option<f32>, + /// Final `latest_known_volume` after seeding (`None` when no books + /// have a classified volume). + pub latest_known_volume: Option<i32>, +} + +/// Seed (or re-seed) tracking defaults for a single series. +/// +/// Updates / inserts a `series_tracking` row with the auto-derived +/// `track_chapters`, `track_volumes`, `latest_known_chapter`, +/// `latest_known_volume` fields. Does **not** modify `tracked` — the caller +/// owns that flip. +/// +/// Idempotent: safe to call repeatedly. Aliases are append-only; tracking +/// flags overwrite on every call. +pub async fn seed_tracking_for_series( + db: &DatabaseConnection, + series_id: Uuid, +) -> Result<SeedReport> { + let series = SeriesRepository::get_by_id(db, series_id) + .await + .with_context(|| format!("Failed to load series {} for seeding", series_id))? + .ok_or_else(|| anyhow::anyhow!("series {} not found", series_id))?; + + let metadata = SeriesMetadataRepository::get_by_series_id(db, series_id) + .await + .context("Failed to load series metadata for seeding")?; + + let mut report = SeedReport::default(); + + // ------------------------------------------------------------------- + // 1. Aliases — collect Latin-script candidates from name + metadata, + // bulk-insert (idempotent on duplicates). + // ------------------------------------------------------------------- + let mut candidates: Vec<String> = Vec::new(); + candidates.push(series.name.clone()); + if let Some(meta) = metadata.as_ref() { + candidates.push(meta.title.clone()); + if let Some(sort) = meta.title_sort.as_ref() + && !sort.trim().is_empty() + { + candidates.push(sort.clone()); + } + } + let alt_titles = AlternateTitleRepository::get_for_series(db, series_id) + .await + .context("Failed to load alternate titles")?; + for alt in alt_titles { + if !alt.title.trim().is_empty() { + candidates.push(alt.title); + } + } + + // Filter and dedupe (case-insensitive trimmed) so the bulk-insert call + // doesn't churn on identical inputs from different sources. + let mut seen: std::collections::HashSet<String> = std::collections::HashSet::new(); + let mut accepted: Vec<String> = Vec::new(); + for raw in candidates { + let trimmed = raw.trim(); + if trimmed.is_empty() { + continue; + } + if !is_latin_alias(trimmed) { + report.aliases_skipped_non_latin += 1; + continue; + } + let key = trimmed.to_lowercase(); + if !seen.insert(key) { + continue; + } + accepted.push(trimmed.to_string()); + } + + if !accepted.is_empty() { + let refs: Vec<&str> = accepted.iter().map(|s| s.as_str()).collect(); + let inserted = + SeriesAliasRepository::bulk_create(db, series_id, &refs, alias_source::METADATA) + .await + .context("Failed to bulk-insert seeded aliases")?; + report.aliases_inserted = inserted; + report.aliases_skipped_duplicate = accepted.len().saturating_sub(inserted); + } + + // ------------------------------------------------------------------- + // 2. Per-axis tracking flags + latest_known_* from book classification. + // ------------------------------------------------------------------- + let aggregates = SeriesRepository::get_book_classification_aggregates(db, series_id) + .await + .context("Failed to load book classification aggregates for seeding")?; + + // Default both axes to true when nothing is classified — losing + // announcements silently on a fresh series is worse than getting one + // false-positive on an axis the series doesn't actually use. + let any_classified = + aggregates.local_max_chapter.is_some() || aggregates.local_max_volume.is_some(); + let track_chapters = if any_classified { + aggregates.local_max_chapter.is_some() + } else { + true + }; + let track_volumes = if any_classified { + aggregates.local_max_volume.is_some() + } else { + true + }; + + let update = TrackingUpdate { + track_chapters: Some(track_chapters), + track_volumes: Some(track_volumes), + // The persisted column is f64; widen from the aggregate's f32. + latest_known_chapter: Some(aggregates.local_max_chapter.map(f64::from)), + latest_known_volume: Some(aggregates.local_max_volume), + ..Default::default() + }; + SeriesTrackingRepository::upsert(db, series_id, update) + .await + .context("Failed to upsert series tracking row during seeding")?; + + report.track_chapters = track_chapters; + report.track_volumes = track_volumes; + report.latest_known_chapter = aggregates.local_max_chapter; + report.latest_known_volume = aggregates.local_max_volume; + + Ok(report) +} + +/// Whether an alias string is composed entirely of Latin-script characters +/// plus common typography (digits, whitespace, punctuation). Non-Latin +/// scripts (CJK, Korean, Cyrillic, etc.) are rejected today because the +/// alias matcher in the Nyaa / MangaUpdates plugins normalizes Latin text +/// only; a non-Latin alias would never match against typical uploader +/// filenames and would just clutter the alias list. +/// +/// Conservative implementation: accept if every alphabetic character is +/// ASCII. This passes "Solo Leveling", "Don't Toy with Me", "Re:Zero", +/// "Bocchi the Rock!", and rejects anything containing CJK ideographs, +/// Hangul, Hiragana/Katakana, Cyrillic, etc. Diacritics (é, ñ, ü, …) are +/// non-ASCII alphabetic and are also rejected — users with such titles can +/// add them as manual aliases. We can widen this later if it bites. +fn is_latin_alias(s: &str) -> bool { + s.chars() + .filter(|c| c.is_alphabetic()) + .all(|c| c.is_ascii()) + // Reject empty / pure-punctuation strings as well; downstream + // create() would error on them anyway. + && s.chars().any(|c| c.is_ascii_alphanumeric()) +} + +#[cfg(test)] +mod tests { + use super::*; + use chrono::Utc; + use sea_orm::{ActiveModelTrait, Set}; + + use crate::db::ScanningStrategy; + use crate::db::entities::{book_metadata, books}; + use crate::db::repositories::{ + AlternateTitleRepository, BookMetadataRepository, BookRepository, LibraryRepository, + SeriesAliasRepository, SeriesRepository, SeriesTrackingRepository, + }; + use crate::db::test_helpers::create_test_db; + + #[test] + fn is_latin_alias_accepts_latin_strings() { + assert!(is_latin_alias("Solo Leveling")); + assert!(is_latin_alias("Don't Toy with Me")); + assert!(is_latin_alias("Re:Zero - Starting Life in Another World")); + assert!(is_latin_alias("Bocchi the Rock!")); + assert!(is_latin_alias("JoJo's Bizarre Adventure Part 7")); + assert!(is_latin_alias("Boruto: Two Blue Vortex")); + } + + #[test] + fn is_latin_alias_rejects_non_latin_strings() { + assert!(!is_latin_alias("나 혼자만 레벨업")); // Korean Hangul + assert!(!is_latin_alias("僕のヒーローアカデミア")); // Japanese + assert!(!is_latin_alias("ダンダダン")); // Katakana + assert!(!is_latin_alias("Война и мир")); // Cyrillic + } + + #[test] + fn is_latin_alias_rejects_diacritics_and_empty_inputs() { + // Conservative: diacritics are non-ASCII, rejected for now. + assert!(!is_latin_alias("Pokémon")); + assert!(!is_latin_alias("Crónica")); + // Pure punctuation / whitespace. + assert!(!is_latin_alias("")); + assert!(!is_latin_alias(" ")); + assert!(!is_latin_alias("!!!---!!!")); + } + + async fn make_series(db: &DatabaseConnection, library_id: Uuid, name: &str) -> Uuid { + let series = SeriesRepository::create(db, library_id, name, None) + .await + .unwrap(); + // SeriesRepository::create already creates a metadata row with title = + // name, so we don't need to insert another one. + let _ = library_id; + series.id + } + + async fn add_classified_book( + db: &DatabaseConnection, + series_id: Uuid, + library_id: Uuid, + path: &str, + volume: Option<i32>, + chapter: Option<f32>, + ) { + let book = books::Model { + id: Uuid::new_v4(), + series_id, + library_id, + file_path: path.to_string(), + file_name: path.rsplit('/').next().unwrap_or(path).to_string(), + file_size: 1024, + file_hash: format!("hash_{}", Uuid::new_v4()), + partial_hash: String::new(), + format: "cbz".to_string(), + page_count: 10, + deleted: false, + analyzed: false, + analysis_error: None, + analysis_errors: None, + modified_at: Utc::now(), + created_at: Utc::now(), + updated_at: Utc::now(), + thumbnail_path: None, + thumbnail_generated_at: None, + koreader_hash: None, + epub_positions: None, + epub_spine_items: None, + }; + let created = BookRepository::create(db, &book, None).await.unwrap(); + let meta = BookMetadataRepository::create_with_title_and_number(db, created.id, None, None) + .await + .unwrap(); + let mut active: book_metadata::ActiveModel = meta.into(); + active.volume = Set(volume); + active.chapter = Set(chapter); + active.update(db).await.unwrap(); + } + + #[tokio::test] + async fn seed_inserts_latin_aliases_and_skips_non_latin() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Solo Leveling").await; + AlternateTitleRepository::create(conn, s, "Korean", "나 혼자만 레벨업") + .await + .unwrap(); + AlternateTitleRepository::create(conn, s, "Romaji", "Na Honjaman Lebel-eob") + .await + .unwrap(); + + let report = seed_tracking_for_series(conn, s).await.unwrap(); + // "Solo Leveling" is in both `series.name` and `series_metadata.title`, + // so dedup folds them; "Na Honjaman Lebel-eob" adds one. Korean alt + // is rejected as non-Latin. + assert_eq!(report.aliases_inserted, 2); + assert_eq!(report.aliases_skipped_non_latin, 1); + + let aliases = SeriesAliasRepository::get_for_series(conn, s) + .await + .unwrap(); + let texts: Vec<&str> = aliases.iter().map(|a| a.alias.as_str()).collect(); + assert!(texts.contains(&"Solo Leveling")); + assert!(texts.contains(&"Na Honjaman Lebel-eob")); + assert!(!texts.iter().any(|a| a.contains('나'))); + } + + #[tokio::test] + async fn seed_is_idempotent_for_aliases() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Berserk").await; + + let first = seed_tracking_for_series(conn, s).await.unwrap(); + assert_eq!(first.aliases_inserted, 1); + + let second = seed_tracking_for_series(conn, s).await.unwrap(); + assert_eq!(second.aliases_inserted, 0); + assert_eq!(second.aliases_skipped_duplicate, 1); + + let aliases = SeriesAliasRepository::get_for_series(conn, s) + .await + .unwrap(); + assert_eq!(aliases.len(), 1); + } + + #[tokio::test] + async fn seed_preserves_user_added_aliases_on_re_seed() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Boruto").await; + + seed_tracking_for_series(conn, s).await.unwrap(); + // User adds a custom alias their uploader uses. + SeriesAliasRepository::create(conn, s, "Boruto: Two Blue Vortex", alias_source::MANUAL) + .await + .unwrap(); + + // Re-seed should not remove the manual alias. + let _ = seed_tracking_for_series(conn, s).await.unwrap(); + let aliases = SeriesAliasRepository::get_for_series(conn, s) + .await + .unwrap(); + let texts: Vec<&str> = aliases.iter().map(|a| a.alias.as_str()).collect(); + assert!(texts.contains(&"Boruto")); + assert!(texts.contains(&"Boruto: Two Blue Vortex")); + } + + #[tokio::test] + async fn seed_writes_track_flags_and_latest_known_with_no_books() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Empty Series").await; + + let report = seed_tracking_for_series(conn, s).await.unwrap(); + // Nothing classified — both axes default to true. + assert!(report.track_chapters); + assert!(report.track_volumes); + assert_eq!(report.latest_known_chapter, None); + assert_eq!(report.latest_known_volume, None); + + let row = SeriesTrackingRepository::get(conn, s) + .await + .unwrap() + .unwrap(); + assert!(row.track_chapters); + assert!(row.track_volumes); + assert!(!row.tracked, "seeding must not flip `tracked` on"); + assert_eq!(row.latest_known_chapter, None); + assert_eq!(row.latest_known_volume, None); + } + + #[tokio::test] + async fn seed_infers_track_volumes_only_for_volume_organized_series() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Volume Series").await; + add_classified_book(conn, s, lib.id, "/v1.cbz", Some(1), None).await; + add_classified_book(conn, s, lib.id, "/v2.cbz", Some(2), None).await; + + let report = seed_tracking_for_series(conn, s).await.unwrap(); + assert!(!report.track_chapters); + assert!(report.track_volumes); + assert_eq!(report.latest_known_chapter, None); + assert_eq!(report.latest_known_volume, Some(2)); + } + + #[tokio::test] + async fn seed_infers_track_chapters_only_for_chapter_organized_series() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Chapter Series").await; + add_classified_book(conn, s, lib.id, "/c1.cbz", None, Some(1.0)).await; + add_classified_book(conn, s, lib.id, "/c2.cbz", None, Some(142.5)).await; + + let report = seed_tracking_for_series(conn, s).await.unwrap(); + assert!(report.track_chapters); + assert!(!report.track_volumes); + assert_eq!(report.latest_known_chapter, Some(142.5)); + assert_eq!(report.latest_known_volume, None); + } + + #[tokio::test] + async fn seed_keeps_both_axes_when_books_have_both_classifications() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Mixed Series").await; + add_classified_book(conn, s, lib.id, "/v1.cbz", Some(1), None).await; + add_classified_book(conn, s, lib.id, "/v2c10.cbz", Some(2), Some(10.0)).await; + + let report = seed_tracking_for_series(conn, s).await.unwrap(); + assert!(report.track_chapters); + assert!(report.track_volumes); + assert_eq!(report.latest_known_chapter, Some(10.0)); + assert_eq!(report.latest_known_volume, Some(2)); + } + + #[tokio::test] + async fn seed_overwrites_track_flags_and_latest_known_on_re_seed() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Repolled").await; + add_classified_book(conn, s, lib.id, "/v1.cbz", Some(1), None).await; + + // First seed: only volume axis on disk, latest_known_volume = 1. + let first = seed_tracking_for_series(conn, s).await.unwrap(); + assert_eq!(first.latest_known_volume, Some(1)); + + // User adds a new book with vol 5; re-seed bumps latest_known_volume. + add_classified_book(conn, s, lib.id, "/v5.cbz", Some(5), None).await; + let second = seed_tracking_for_series(conn, s).await.unwrap(); + assert_eq!(second.latest_known_volume, Some(5)); + + let row = SeriesTrackingRepository::get(conn, s) + .await + .unwrap() + .unwrap(); + assert_eq!(row.latest_known_volume, Some(5)); + } + + #[tokio::test] + async fn seed_does_not_flip_tracked() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) + .await + .unwrap(); + let s = make_series(conn, lib.id, "Untracked").await; + + seed_tracking_for_series(conn, s).await.unwrap(); + let row = SeriesTrackingRepository::get(conn, s) + .await + .unwrap() + .unwrap(); + assert!(!row.tracked); + } + + #[tokio::test] + async fn seed_reports_missing_series_as_error() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let bogus = Uuid::new_v4(); + let err = seed_tracking_for_series(conn, bogus).await.unwrap_err(); + assert!(err.to_string().contains("not found")); + } +} diff --git a/src/tasks/handlers/backfill_tracking.rs b/src/tasks/handlers/backfill_tracking.rs index 022b597c..0ff2d78b 100644 --- a/src/tasks/handlers/backfill_tracking.rs +++ b/src/tasks/handlers/backfill_tracking.rs @@ -1,25 +1,26 @@ //! `BackfillTrackingFromMetadata` task handler. //! -//! Walks series in scope and seeds `series_aliases` rows from existing metadata -//! (canonical title + alternate titles). Idempotent on re-run — `SeriesAliasRepository::create` -//! returns the existing row when the same alias already exists for a series. +//! Walks series in scope and (re-)seeds tracking defaults from existing +//! data: aliases from metadata, `latest_known_*` from local book +//! classification, and per-axis `track_*` flags from book metadata. Routes +//! through `services::release::seed::seed_tracking_for_series` so the per- +//! series PATCH path, the bulk track-for-releases endpoint, and this task +//! all share one canonical seeding implementation. //! //! Does NOT toggle `tracked`. Enabling tracking is always an explicit user -//! action; this task is a one-time data-prep pass that the admin can run after -//! upgrading or after a metadata refresh. +//! action; this task is a maintenance pass that refreshes auto-derived +//! fields after a metadata refresh or library re-scan. use anyhow::Result; use sea_orm::DatabaseConnection; use std::sync::Arc; -use tracing::{debug, info, warn}; +use tracing::{info, warn}; use uuid::Uuid; -use crate::db::entities::series_aliases::alias_source; use crate::db::entities::tasks; -use crate::db::repositories::{ - AlternateTitleRepository, SeriesAliasRepository, SeriesMetadataRepository, SeriesRepository, -}; +use crate::db::repositories::SeriesRepository; use crate::events::EventBroadcaster; +use crate::services::release::seed::{SeedReport, seed_tracking_for_series}; use crate::tasks::handlers::TaskHandler; use crate::tasks::types::TaskResult; @@ -61,32 +62,34 @@ impl TaskHandler for BackfillTrackingFromMetadataHandler { let mut summary = BackfillSummary::default(); for series_id in series_to_process { - match backfill_one(db, series_id).await { - Ok(per_series) => { - summary.merge(per_series); - } + match seed_tracking_for_series(db, series_id).await { + Ok(report) => summary.merge(report), Err(e) => { - warn!("Backfill failed for series {}: {}", series_id, e); + warn!("Seed failed for series {}: {}", series_id, e); summary.errors += 1; } } } info!( - "Backfill complete ({}): {} series processed, {} aliases inserted, {} skipped, {} errors", + "Backfill complete ({}): {} series processed, {} aliases inserted, \ + {} skipped duplicate, {} skipped non-latin, {} errors", scope, summary.processed, summary.aliases_inserted, summary.aliases_skipped_duplicate, + summary.aliases_skipped_non_latin, summary.errors, ); Ok(TaskResult::success_with_data( format!( - "Processed {} series, inserted {} new aliases ({} duplicates skipped, {} errors)", + "Processed {} series, inserted {} new aliases \ + ({} duplicates, {} non-Latin skipped, {} errors)", summary.processed, summary.aliases_inserted, summary.aliases_skipped_duplicate, + summary.aliases_skipped_non_latin, summary.errors, ), serde_json::json!({ @@ -94,6 +97,7 @@ impl TaskHandler for BackfillTrackingFromMetadataHandler { "series_processed": summary.processed, "aliases_inserted": summary.aliases_inserted, "aliases_skipped_duplicate": summary.aliases_skipped_duplicate, + "aliases_skipped_non_latin": summary.aliases_skipped_non_latin, "errors": summary.errors, }), )) @@ -106,23 +110,19 @@ struct BackfillSummary { processed: usize, aliases_inserted: usize, aliases_skipped_duplicate: usize, + aliases_skipped_non_latin: usize, errors: usize, } impl BackfillSummary { - fn merge(&mut self, other: PerSeriesSummary) { + fn merge(&mut self, report: SeedReport) { self.processed += 1; - self.aliases_inserted += other.inserted; - self.aliases_skipped_duplicate += other.skipped_duplicate; + self.aliases_inserted += report.aliases_inserted; + self.aliases_skipped_duplicate += report.aliases_skipped_duplicate; + self.aliases_skipped_non_latin += report.aliases_skipped_non_latin; } } -#[derive(Default)] -struct PerSeriesSummary { - inserted: usize, - skipped_duplicate: usize, -} - fn describe_scope(library_id: Option<Uuid>, series_ids: Option<&[Uuid]>) -> String { match (library_id, series_ids) { (_, Some(ids)) => format!("scope=series_ids:{}", ids.len()), @@ -147,67 +147,14 @@ async fn resolve_series_scope( Ok(all.into_iter().map(|s| s.id).collect()) } -async fn backfill_one(db: &DatabaseConnection, series_id: Uuid) -> Result<PerSeriesSummary> { - let metadata = match SeriesMetadataRepository::get_by_series_id(db, series_id).await? { - Some(m) => m, - None => { - // Metadata is required for a series to exist normally; if missing, - // the series row is in an unexpected state - skip it. - debug!("Series {} has no metadata, skipping", series_id); - return Ok(PerSeriesSummary::default()); - } - }; - - let mut candidates: Vec<String> = Vec::new(); - candidates.push(metadata.title.clone()); - if let Some(sort) = metadata.title_sort.as_ref() - && !sort.trim().is_empty() - { - candidates.push(sort.clone()); - } - - let alt_titles = AlternateTitleRepository::get_for_series(db, series_id).await?; - for alt in alt_titles { - if !alt.title.trim().is_empty() { - candidates.push(alt.title); - } - } - - let mut summary = PerSeriesSummary::default(); - for alias in candidates { - let trimmed = alias.trim(); - if trimmed.is_empty() { - continue; - } - // Track inserts vs idempotent skips by counting before/after. - let before = SeriesAliasRepository::count_for_series(db, series_id).await?; - match SeriesAliasRepository::create(db, series_id, trimmed, alias_source::METADATA).await { - Ok(_) => { - let after = SeriesAliasRepository::count_for_series(db, series_id).await?; - if after > before { - summary.inserted += 1; - } else { - summary.skipped_duplicate += 1; - } - } - Err(e) => { - // Aliases that normalize to empty (e.g., "!!!---!!!" entries from - // odd metadata) are non-fatal — log and skip. - debug!( - "Skipping alias '{}' for series {}: {}", - trimmed, series_id, e - ); - } - } - } - Ok(summary) -} - #[cfg(test)] mod tests { use super::*; use crate::db::ScanningStrategy; - use crate::db::repositories::{LibraryRepository, SeriesAliasRepository, SeriesRepository}; + use crate::db::repositories::{ + AlternateTitleRepository, LibraryRepository, SeriesAliasRepository, SeriesRepository, + SeriesTrackingRepository, + }; use crate::db::test_helpers::create_test_db; async fn make_series( @@ -227,8 +174,11 @@ mod tests { series.id } + /// The handler now delegates to `seed_tracking_for_series`; this test + /// pins the latin-only filtering behavior at the seeded layer (the + /// previous handler-internal logic seeded all scripts and is gone). #[tokio::test] - async fn handler_seeds_aliases_from_title_and_alternates() { + async fn delegated_seed_inserts_latin_aliases_skipping_non_latin() { let (db, _temp) = create_test_db().await; let conn = db.sea_orm_connection(); let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) @@ -242,21 +192,22 @@ mod tests { ) .await; - let summary = backfill_one(conn, s1).await.unwrap(); - assert_eq!(summary.inserted, 2); - assert_eq!(summary.skipped_duplicate, 0); + let report = seed_tracking_for_series(conn, s1).await.unwrap(); + // "My Hero Academia" appears in both `series.name` and metadata title; + // dedup folds them. Japanese alt is skipped. + assert_eq!(report.aliases_inserted, 1); + assert_eq!(report.aliases_skipped_non_latin, 1); let aliases = SeriesAliasRepository::get_for_series(conn, s1) .await .unwrap(); let texts: Vec<&str> = aliases.iter().map(|a| a.alias.as_str()).collect(); assert!(texts.contains(&"My Hero Academia")); - assert!(texts.contains(&"僕のヒーローアカデミア")); - assert!(aliases.iter().all(|a| a.source == "metadata")); + assert!(!texts.iter().any(|a| a.contains('僕'))); } #[tokio::test] - async fn handler_is_idempotent_on_rerun() { + async fn delegated_seed_is_idempotent_on_rerun() { let (db, _temp) = create_test_db().await; let conn = db.sea_orm_connection(); let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) @@ -264,12 +215,13 @@ mod tests { .unwrap(); let s1 = make_series(conn, lib.id, "Series A", Some("Alt A")).await; - let first = backfill_one(conn, s1).await.unwrap(); - assert_eq!(first.inserted, 2); + let first = seed_tracking_for_series(conn, s1).await.unwrap(); + // "Series A" + "Alt A" — both Latin, both inserted. + assert_eq!(first.aliases_inserted, 2); - let second = backfill_one(conn, s1).await.unwrap(); - assert_eq!(second.inserted, 0, "re-run should not insert duplicates"); - assert_eq!(second.skipped_duplicate, 2); + let second = seed_tracking_for_series(conn, s1).await.unwrap(); + assert_eq!(second.aliases_inserted, 0); + assert_eq!(second.aliases_skipped_duplicate, 2); let aliases = SeriesAliasRepository::get_for_series(conn, s1) .await @@ -278,8 +230,7 @@ mod tests { } #[tokio::test] - async fn handler_does_not_enable_tracking() { - use crate::db::repositories::SeriesTrackingRepository; + async fn delegated_seed_does_not_enable_tracking() { let (db, _temp) = create_test_db().await; let conn = db.sea_orm_connection(); let lib = LibraryRepository::create(conn, "L", "/p", ScanningStrategy::Default) @@ -287,12 +238,12 @@ mod tests { .unwrap(); let s1 = make_series(conn, lib.id, "Some Title", None).await; - backfill_one(conn, s1).await.unwrap(); + seed_tracking_for_series(conn, s1).await.unwrap(); let row = SeriesTrackingRepository::get(conn, s1).await.unwrap(); assert!( - row.is_none(), - "backfill should not create or modify tracking row" + row.map(|r| !r.tracked).unwrap_or(true), + "seeding must not flip `tracked` on" ); } diff --git a/src/tasks/handlers/poll_release_source.rs b/src/tasks/handlers/poll_release_source.rs index 6d2fb617..327b838d 100644 --- a/src/tasks/handlers/poll_release_source.rs +++ b/src/tasks/handlers/poll_release_source.rs @@ -317,6 +317,13 @@ impl TaskHandler for PollReleaseSourceHandler { // Process candidates (the plugin may have streamed some via // reverse-RPC already; those are already on the ledger). + // Snapshot fields needed *after* the consume-loop below so we + // can still build the `last_summary` once `response.candidates` + // is moved. + let response_etag = response.etag.clone(); + let response_not_modified = response.not_modified; + let response_upstream_status = response.upstream_status; + let mut result = PollReleaseSourceResult { source_id, candidates_returned: response.candidates.len() as u32, @@ -374,13 +381,19 @@ impl TaskHandler for PollReleaseSourceHandler { // Persist source state. If we hit a successful 2xx upstream we // already noted it for backoff; clear `last_error` and stamp - // `last_polled_at`. + // `last_polled_at`. The one-line `summary` is surfaced in the + // Release tracking UI under the per-row status badge so users + // can see *why* a poll returned no announcements (no tracked + // series, upstream not modified, …) without container logs. let polled_at = Utc::now(); + let summary = + build_poll_summary(response_not_modified, response_upstream_status, &result); if let Err(e) = ReleaseSourceRepository::record_poll_success( db, source.id, polled_at, - response.etag.clone(), + response_etag, + Some(summary), ) .await { @@ -390,7 +403,7 @@ impl TaskHandler for PollReleaseSourceHandler { // If the plugin signalled an upstream error code but didn't // return an RPC error, also stamp `last_error` so admins see // it in the UI. - if let Some(status) = response.upstream_status + if let Some(status) = response_upstream_status && is_backoff_status(status) { let _ = ReleaseSourceRepository::record_poll_error( @@ -403,7 +416,7 @@ impl TaskHandler for PollReleaseSourceHandler { } // Reset backoff on a clean run if we didn't already. - if backoff_url.is_none() && response.upstream_status.is_none() { + if backoff_url.is_none() && response_upstream_status.is_none() { let url_hint = derive_url_hint(&source); self.backoff.record_success(&url_hint).await; } @@ -422,6 +435,55 @@ impl TaskHandler for PollReleaseSourceHandler { } } +/// Build the one-line `last_summary` string written to `release_sources` +/// after a successful poll, intended for direct display under the Release +/// tracking row's status badge. +/// +/// Example outputs: +/// - `"Up to date — upstream returned 304 (not modified)"` +/// - `"Fetched 0 items"` (e.g. no tracked series with aliases for the source) +/// - `"Fetched 12 items, recorded 0 (12 already in ledger)"` +/// - `"Fetched 5 items, recorded 1, dropped 4 below threshold"` +/// - `"Upstream warning: HTTP 429"` (when the plugin reports an error code +/// but didn't fail the RPC outright) +pub(crate) fn build_poll_summary( + not_modified: Option<bool>, + upstream_status: Option<u16>, + result: &PollReleaseSourceResult, +) -> String { + if matches!(not_modified, Some(true)) { + return "Up to date — upstream returned 304 (not modified)".to_string(); + } + + let returned = result.candidates_returned; + let recorded = result.candidates_recorded; + let deduped = result.candidates_deduped; + let rejected = result.candidates_rejected; + + let mut s = match returned { + 0 => "Fetched 0 items".to_string(), + 1 => format!("Fetched 1 item, recorded {}", recorded), + n => format!("Fetched {} items, recorded {}", n, recorded), + }; + if deduped > 0 { + s.push_str(&format!(" ({} already in ledger)", deduped)); + } + if rejected > 0 { + s.push_str(&format!(", dropped {} below threshold", rejected)); + } + + // Upstream warning takes a trailing-suffix slot so the count info isn't + // lost. Backoff-significant statuses (429 / 5xx) are paired with a + // `last_error` write elsewhere; this is just a friendly inline note. + if let Some(status) = upstream_status + && is_backoff_status(status) + { + s.push_str(&format!(" · upstream warning: HTTP {}", status)); + } + + s +} + /// Emit a `ReleaseAnnounced` event for a freshly-inserted ledger row. /// /// Failure to broadcast (no subscribers, channel closed) is a benign noop — @@ -587,6 +649,7 @@ mod tests { last_error_at: None, etag: None, config: None, + last_summary: None, created_at: Utc::now(), updated_at: Utc::now(), } @@ -757,4 +820,70 @@ mod tests { completed_at: None, } } + + // ------------------------------------------------------------------------- + // build_poll_summary — pins the user-facing copy that lands under the + // Release tracking row's status badge. + // ------------------------------------------------------------------------- + + fn empty_result() -> PollReleaseSourceResult { + PollReleaseSourceResult { + source_id: Uuid::new_v4(), + ..Default::default() + } + } + + #[test] + fn build_poll_summary_reports_not_modified_explicitly() { + let r = empty_result(); + let s = build_poll_summary(Some(true), None, &r); + assert_eq!(s, "Up to date — upstream returned 304 (not modified)"); + } + + #[test] + fn build_poll_summary_zero_items() { + let r = empty_result(); + let s = build_poll_summary(Some(false), None, &r); + assert_eq!(s, "Fetched 0 items"); + } + + #[test] + fn build_poll_summary_one_item_uses_singular() { + let mut r = empty_result(); + r.candidates_returned = 1; + r.candidates_recorded = 1; + let s = build_poll_summary(None, None, &r); + assert_eq!(s, "Fetched 1 item, recorded 1"); + } + + #[test] + fn build_poll_summary_includes_dedup_and_threshold_breakdown() { + let mut r = empty_result(); + r.candidates_returned = 12; + r.candidates_recorded = 1; + r.candidates_deduped = 7; + r.candidates_rejected = 4; + let s = build_poll_summary(None, None, &r); + assert_eq!( + s, + "Fetched 12 items, recorded 1 (7 already in ledger), dropped 4 below threshold" + ); + } + + #[test] + fn build_poll_summary_appends_upstream_warning_for_backoff_status() { + let mut r = empty_result(); + r.candidates_returned = 0; + let s = build_poll_summary(None, Some(429), &r); + assert_eq!(s, "Fetched 0 items · upstream warning: HTTP 429"); + } + + #[test] + fn build_poll_summary_does_not_append_for_clean_2xx() { + let mut r = empty_result(); + r.candidates_returned = 2; + r.candidates_recorded = 2; + let s = build_poll_summary(None, Some(200), &r); + assert_eq!(s, "Fetched 2 items, recorded 2"); + } } diff --git a/web/openapi.json b/web/openapi.json index 8a878e34..dda4e3ec 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -7139,6 +7139,54 @@ ] } }, + "/api/v1/release-sources/applicability": { + "get": { + "tags": [ + "Releases" + ], + "summary": "Whether release tracking is available for a given library.", + "description": "Read-only, requires only `SeriesRead`: the response carries no\nadmin-sensitive data (no plugin IDs, no configs, no library\nallowlists), just the boolean and friendly display names. Used by the\nfrontend to:\n\n- hide the per-series Tracking panel + Releases tab on libraries with\n no applicable plugin (cleaner UX);\n- decide whether to show the \"Track for releases\" / \"Don't track for\n releases\" entries in the bulk-selection menu.", + "operationId": "get_release_tracking_applicability", + "parameters": [ + { + "name": "libraryId", + "in": "query", + "description": "Optional library scope. When provided, only plugins that apply to\nthis library are considered (a plugin's `library_ids` field is\neither empty = all, or contains this UUID).", + "required": false, + "schema": { + "type": [ + "string", + "null" + ], + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "Applicability info", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ApplicabilityResponse" + } + } + } + }, + "403": { + "description": "SeriesRead permission required" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/release-sources/{source_id}": { "patch": { "tags": [ @@ -8197,6 +8245,52 @@ ] } }, + "/api/v1/series/bulk/track-for-releases": { + "post": { + "tags": [ + "Bulk Operations" + ], + "summary": "Bulk-enable release tracking for multiple series.", + "description": "For each `series_id` in the request, flips `series_tracking.tracked` to\n`true` and runs the seed pass (auto-derives aliases, `latest_known_*`,\n`track_chapters` / `track_volumes` from existing data). Series that don't\nexist are reported as `outcome: skipped`. Series already tracked are\nreported as `outcome: skipped, detail: \"already tracked\"` and the seed is\nnot re-run (idempotent — a re-run would simply re-derive identical\nvalues, but we skip the work).\n\nMirrors the per-series PATCH `false -> true` transition: same seed\nfunction, same idempotency guarantees.", + "operationId": "bulk_track_series_for_releases", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkSeriesRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Bulk-tracked series", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkTrackForReleasesResponse" + } + } + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + } + }, + "security": [ + { + "bearer_auth": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/bulk/unread": { "post": { "tags": [ @@ -8243,6 +8337,52 @@ ] } }, + "/api/v1/series/bulk/untrack-for-releases": { + "post": { + "tags": [ + "Bulk Operations" + ], + "summary": "Bulk-disable release tracking for multiple series.", + "description": "Flips `series_tracking.tracked` to `false`. Does not delete aliases,\n`latest_known_*`, or other tracking config — the user can re-track\nwithout losing customizations, and the seed will re-derive any\nauto-derived fields on the next track-on transition.", + "operationId": "bulk_untrack_series_for_releases", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkSeriesRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Bulk-untracked series", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BulkTrackForReleasesResponse" + } + } + } + }, + "401": { + "description": "Unauthorized" + }, + "403": { + "description": "Forbidden" + } + }, + "security": [ + { + "bearer_auth": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/series/in-progress": { "get": { "tags": [ @@ -17660,6 +17800,27 @@ } } }, + "ApplicabilityResponse": { + "type": "object", + "description": "Response shape for `GET /api/v1/release-sources/applicability`.", + "required": [ + "applicable", + "pluginDisplayNames" + ], + "properties": { + "applicable": { + "type": "boolean", + "description": "`true` when at least one enabled `release_source` plugin applies to\nthe requested library (or, if no `libraryId` was supplied, to *any*\nlibrary). The frontend uses this to decide whether to render the\nper-series Tracking panel and Releases tab, or to show the\nbulk-track menu entry." + }, + "pluginDisplayNames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Plugin display names (or fallback to `name` when no manifest cached\nyet) of the enabled release-source plugins covering this library.\nEmpty when `applicable` is `false`. Useful for surfacing \"Powered by\nMangaUpdates, Nyaa\" hints in the UI." + } + } + }, "AuthorContextDto": { "type": "object", "description": "Author context for template evaluation.", @@ -21365,6 +21526,65 @@ } } }, + "BulkTrackForReleasesItem": { + "type": "object", + "description": "Per-series outcome of a bulk track / untrack operation.\n\nReturned in `BulkTrackForReleasesResponse.results` so the UI can show a\nper-row status (e.g. \"tracked\", \"skipped: not found\", \"errored: …\") without\nre-querying the tracking config endpoint per series.", + "required": [ + "seriesId", + "outcome" + ], + "properties": { + "detail": { + "type": [ + "string", + "null" + ], + "description": "Free-form detail (error message for `errored`, reason for `skipped`).\n`None` for the success cases." + }, + "outcome": { + "type": "string", + "description": "`tracked` | `untracked` | `skipped` | `errored`." + }, + "seriesId": { + "type": "string", + "format": "uuid" + } + } + }, + "BulkTrackForReleasesResponse": { + "type": "object", + "description": "Aggregate result of `POST /series/bulk/track-for-releases` and its untrack\ncounterpart. Counts and per-series outcomes for client-side display.", + "required": [ + "changed", + "alreadyInState", + "errored", + "results" + ], + "properties": { + "alreadyInState": { + "type": "integer", + "description": "Series whose `tracked` flag was already in the target state. No-ops.", + "minimum": 0 + }, + "changed": { + "type": "integer", + "description": "Series successfully flipped to `tracked = true` (or `false` for the\nuntrack endpoint).", + "minimum": 0 + }, + "errored": { + "type": "integer", + "description": "Series that could not be processed (missing, error, etc.).", + "minimum": 0 + }, + "results": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BulkTrackForReleasesItem" + }, + "description": "Per-series outcomes in input order." + } + } + }, "BulkUpdateBookLocksRequest": { "allOf": [ { @@ -32728,6 +32948,13 @@ ], "format": "date-time" }, + "lastSummary": { + "type": [ + "string", + "null" + ], + "description": "One-line summary of the most recent successful poll. Surfaced under\nthe row's status badge so users can see *why* a poll returned no\nannouncements without grepping logs. NULL until the first successful\npoll on the source." + }, "pluginId": { "type": "string", "description": "Owning plugin id, or `core` for in-core synthetic sources.", diff --git a/web/src/api/releases.ts b/web/src/api/releases.ts index 914dc8e0..042ceb11 100644 --- a/web/src/api/releases.ts +++ b/web/src/api/releases.ts @@ -9,6 +9,8 @@ export type UpdateReleaseSourceRequest = components["schemas"]["UpdateReleaseSourceRequest"]; export type PaginatedReleases = components["schemas"]["PaginatedResponse_ReleaseLedgerEntryDto"]; +export type ReleaseTrackingApplicability = + components["schemas"]["ApplicabilityResponse"]; export interface ReleaseInboxParams { state?: string; @@ -109,4 +111,27 @@ export const releaseSourcesApi = { ); return response.data; }, + + /** + * Whether release tracking is available for a given library scope. + * + * Returns `applicable: true` when at least one enabled release-source + * plugin applies to `libraryId` (or, with `libraryId` omitted, to any + * library). The frontend uses this to hide the per-series Tracking panel + * and Releases tab on libraries that aren't covered, and to gate the + * bulk-track menu entry. + */ + applicability: async ( + libraryId?: string, + ): Promise<ReleaseTrackingApplicability> => { + const params = new URLSearchParams(); + if (libraryId) { + params.set("libraryId", libraryId); + } + const qs = params.toString(); + const response = await api.get<ReleaseTrackingApplicability>( + `/release-sources/applicability${qs ? `?${qs}` : ""}`, + ); + return response.data; + }, }; diff --git a/web/src/api/series.ts b/web/src/api/series.ts index f19db9ea..52b52d8e 100644 --- a/web/src/api/series.ts +++ b/web/src/api/series.ts @@ -7,8 +7,12 @@ import type { SeriesCondition, SeriesListRequest, } from "@/types"; +import type { components } from "@/types/api.generated"; import { api } from "./client"; +export type BulkTrackForReleasesResponse = + components["schemas"]["BulkTrackForReleasesResponse"]; + export interface SeriesFilters { page?: number; pageSize?: number; @@ -376,6 +380,36 @@ export const seriesApi = { return response.data; }, + /** + * Bulk-enable release tracking. Flips `tracked: true` on each series and + * runs the seed pass (auto-derives aliases, latest_known_*, track_*). + * Series already tracked are reported as `outcome: skipped`. + */ + bulkTrackForReleases: async ( + seriesIds: string[], + ): Promise<BulkTrackForReleasesResponse> => { + const response = await api.post<BulkTrackForReleasesResponse>( + "/series/bulk/track-for-releases", + { seriesIds }, + ); + return response.data; + }, + + /** + * Bulk-disable release tracking. Flips `tracked: false` without deleting + * aliases or other tracking config — re-tracking later still re-seeds + * the auto-derived fields. + */ + bulkUntrackForReleases: async ( + seriesIds: string[], + ): Promise<BulkTrackForReleasesResponse> => { + const response = await api.post<BulkTrackForReleasesResponse>( + "/series/bulk/untrack-for-releases", + { seriesIds }, + ); + return response.data; + }, + /** * Queue analysis for all books in multiple series in bulk * @param seriesIds - Array of series IDs to analyze diff --git a/web/src/components/library/BulkSelectionToolbar.test.tsx b/web/src/components/library/BulkSelectionToolbar.test.tsx index f00a14c6..b1c14d8f 100644 --- a/web/src/components/library/BulkSelectionToolbar.test.tsx +++ b/web/src/components/library/BulkSelectionToolbar.test.tsx @@ -35,19 +35,17 @@ vi.mock("@/api/series", () => ({ tasksEnqueued: 5, message: "Enqueued 5 analysis tasks for 2 series", }), - }, -})); - -vi.mock("@/api/tracking", () => ({ - trackingApi: { - updateTracking: vi.fn().mockResolvedValue({ - seriesId: "series-1", - tracked: true, - trackingStatus: "unknown", - trackChapters: true, - trackVolumes: true, - createdAt: "2024-01-01T00:00:00Z", - updatedAt: "2024-01-01T00:00:00Z", + bulkTrackForReleases: vi.fn().mockResolvedValue({ + changed: 2, + alreadyInState: 0, + errored: 0, + results: [], + }), + bulkUntrackForReleases: vi.fn().mockResolvedValue({ + changed: 1, + alreadyInState: 0, + errored: 0, + results: [], }), }, })); @@ -57,6 +55,15 @@ vi.mock("@/hooks/usePermissions", () => ({ usePermissions: vi.fn(), })); +// Mock the applicability hook so the Release Tracking menu entries render. +// Tests that need to hide them can override the mock. +vi.mock("@/hooks/useReleaseTrackingApplicability", () => ({ + useReleaseTrackingApplicability: vi.fn(() => ({ + data: { applicable: true, pluginDisplayNames: ["Nyaa Releases"] }, + isLoading: false, + })), +})); + const mockPermissionsAdmin = () => { vi.mocked(usePermissions).mockReturnValue({ user: { id: "user-1", username: "admin", role: "admin" } as ReturnType< @@ -366,8 +373,8 @@ describe("BulkSelectionToolbar", () => { }); }); - it("should call updateTracking for each series when Mark as Tracked clicked", async () => { - const { trackingApi } = await import("@/api/tracking"); + it("calls bulkTrackForReleases with all selected series when Track for releases is clicked", async () => { + const { seriesApi } = await import("@/api/series"); const user = userEvent.setup(); useBulkSelectionStore.getState().toggleSelection("series-1", "series"); @@ -377,22 +384,23 @@ describe("BulkSelectionToolbar", () => { await user.click(screen.getByRole("button", { name: /more actions/i })); await waitFor(() => { - expect(screen.getByText("Mark as Tracked")).toBeInTheDocument(); + expect(screen.getByText("Track for releases")).toBeInTheDocument(); }); - await user.click(screen.getByText("Mark as Tracked")); + await user.click(screen.getByText("Track for releases")); await waitFor(() => { - expect(trackingApi.updateTracking).toHaveBeenCalledWith("series-1", { - tracked: true, - }); - expect(trackingApi.updateTracking).toHaveBeenCalledWith("series-2", { - tracked: true, - }); + expect(seriesApi.bulkTrackForReleases).toHaveBeenCalledTimes(1); }); + // The toolbar passes the full selected-id list as a single argument. + const calls = (seriesApi.bulkTrackForReleases as ReturnType<typeof vi.fn>) + .mock.calls; + expect(calls[0][0]).toEqual( + expect.arrayContaining(["series-1", "series-2"]), + ); }); - it("should call updateTracking with tracked=false when Mark as Untracked clicked", async () => { - const { trackingApi } = await import("@/api/tracking"); + it("calls bulkUntrackForReleases when Don't track for releases is clicked", async () => { + const { seriesApi } = await import("@/api/series"); const user = userEvent.setup(); useBulkSelectionStore.getState().toggleSelection("series-1", "series"); @@ -401,14 +409,16 @@ describe("BulkSelectionToolbar", () => { await user.click(screen.getByRole("button", { name: /more actions/i })); await waitFor(() => { - expect(screen.getByText("Mark as Untracked")).toBeInTheDocument(); + expect( + screen.getByText("Don't track for releases"), + ).toBeInTheDocument(); }); - await user.click(screen.getByText("Mark as Untracked")); + await user.click(screen.getByText("Don't track for releases")); await waitFor(() => { - expect(trackingApi.updateTracking).toHaveBeenCalledWith("series-1", { - tracked: false, - }); + expect(seriesApi.bulkUntrackForReleases).toHaveBeenCalledWith([ + "series-1", + ]); }); }); }); diff --git a/web/src/components/library/BulkSelectionToolbar.tsx b/web/src/components/library/BulkSelectionToolbar.tsx index 51598183..b66aaafc 100644 --- a/web/src/components/library/BulkSelectionToolbar.tsx +++ b/web/src/components/library/BulkSelectionToolbar.tsx @@ -29,9 +29,9 @@ import { useEffect, useMemo, useState } from "react"; import { booksApi } from "@/api/books"; import { pluginActionsApi, pluginsApi } from "@/api/plugins"; import { seriesApi } from "@/api/series"; -import { trackingApi } from "@/api/tracking"; import { BulkMetadataEditModal } from "@/components/library/BulkMetadataEditModal"; import { usePermissions } from "@/hooks/usePermissions"; +import { useReleaseTrackingApplicability } from "@/hooks/useReleaseTrackingApplicability"; import { selectPageItems, selectSelectionCount, @@ -98,6 +98,16 @@ export function BulkSelectionToolbar() { enabled: selectionType === "book" && count > 0, }); + // Whether any enabled release-source plugin exists in the install at all. + // Bulk selections may span libraries, so we use the global (no library + // filter) applicability — it just hides the "Mark as Tracked" / "Mark as + // Untracked" entries when no plugin is configured anywhere. Per-library + // plugin scopes still apply at poll time. + const { data: releaseTrackingApplicability } = + useReleaseTrackingApplicability(); + const showReleaseTrackingMenu = + releaseTrackingApplicability?.applicable === true; + // Helper to refetch all related queries const refetchAll = () => { queryClient.refetchQueries({ @@ -392,6 +402,10 @@ export function BulkSelectionToolbar() { // Bulk set release-tracking flag. No dedicated bulk endpoint exists yet — // fan out per-series PATCH calls. Acceptable scale for a hand-managed library // (hundreds of series, low-frequency action). + // Single-call bulk track/untrack via the dedicated endpoints. The host + // runs the seed pass per series on track-on transitions (auto-derives + // aliases, latest_known_*, track_chapters/volumes) so users get + // notification-ready tracking without touching the per-series panel. const bulkSetTrackedMutation = useMutation({ mutationFn: async ({ seriesIds, @@ -400,23 +414,26 @@ export function BulkSelectionToolbar() { seriesIds: string[]; tracked: boolean; }) => { - const results = await Promise.allSettled( - seriesIds.map((id) => trackingApi.updateTracking(id, { tracked })), - ); - const failed = results.filter((r) => r.status === "rejected").length; - return { total: seriesIds.length, failed }; + const response = tracked + ? await seriesApi.bulkTrackForReleases(seriesIds) + : await seriesApi.bulkUntrackForReleases(seriesIds); + return { total: seriesIds.length, response }; }, - onSuccess: ({ total, failed }, { tracked }) => { - if (failed === 0) { + onSuccess: ({ total, response }, { tracked }) => { + const errored = response.errored; + if (errored === 0) { notifications.show({ title: tracked ? "Tracking enabled" : "Tracking disabled", - message: `Updated ${total} series.`, + message: + response.alreadyInState > 0 + ? `Updated ${response.changed} series (${response.alreadyInState} already in this state).` + : `Updated ${response.changed} of ${total} series.`, color: tracked ? "green" : "blue", }); } else { notifications.show({ title: "Some updates failed", - message: `${total - failed} of ${total} series updated; ${failed} failed.`, + message: `${response.changed} updated, ${response.alreadyInState} unchanged, ${errored} failed.`, color: "yellow", }); } @@ -846,32 +863,36 @@ export function BulkSelectionToolbar() { Reprocess Titles </Menu.Item> - <Menu.Divider /> - <Menu.Label>Release Tracking</Menu.Label> - <Menu.Item - leftSection={<IconBell size={16} />} - onClick={() => - bulkSetTrackedMutation.mutate({ - seriesIds: selectedIds, - tracked: true, - }) - } - disabled={isAnyPending} - > - Mark as Tracked - </Menu.Item> - <Menu.Item - leftSection={<IconBellOff size={16} />} - onClick={() => - bulkSetTrackedMutation.mutate({ - seriesIds: selectedIds, - tracked: false, - }) - } - disabled={isAnyPending} - > - Mark as Untracked - </Menu.Item> + {showReleaseTrackingMenu && ( + <> + <Menu.Divider /> + <Menu.Label>Release Tracking</Menu.Label> + <Menu.Item + leftSection={<IconBell size={16} />} + onClick={() => + bulkSetTrackedMutation.mutate({ + seriesIds: selectedIds, + tracked: true, + }) + } + disabled={isAnyPending} + > + Track for releases + </Menu.Item> + <Menu.Item + leftSection={<IconBellOff size={16} />} + onClick={() => + bulkSetTrackedMutation.mutate({ + seriesIds: selectedIds, + tracked: false, + }) + } + disabled={isAnyPending} + > + Don't track for releases + </Menu.Item> + </> + )} <Menu.Divider /> <Menu.Label>Metadata</Menu.Label> diff --git a/web/src/components/series/TrackingPanel.test.tsx b/web/src/components/series/TrackingPanel.test.tsx index 3ee9962e..65fd868f 100644 --- a/web/src/components/series/TrackingPanel.test.tsx +++ b/web/src/components/series/TrackingPanel.test.tsx @@ -99,7 +99,7 @@ describe("TrackingPanel", () => { }); }); - it("renders aliases and supports add", async () => { + it("renders aliases and supports add (after expanding the collapsed panel)", async () => { const user = userEvent.setup(); get.mockResolvedValue({ ...baseTracking, tracked: true }); list.mockResolvedValue([baseAlias("Existing")]); @@ -107,6 +107,11 @@ describe("TrackingPanel", () => { renderWithProviders(<TrackingPanel seriesId={SERIES_ID} canEdit={true} />); + // The panel is collapsed by default — expand to reach the alias UI. + await user.click( + await screen.findByRole("button", { name: /Expand release tracking/i }), + ); + await screen.findByText("Existing"); const input = screen.getByPlaceholderText(/Add an alias/i); @@ -146,9 +151,14 @@ describe("TrackingPanel", () => { renderWithProviders(<TrackingPanel seriesId={SERIES_ID} canEdit={true} />); - await screen.findByText("Delete Me"); + // Expand to reveal the alias list. + await user.click( + await screen.findByRole("button", { name: /Expand release tracking/i }), + ); - const removeButton = screen.getByRole("button", { + // findByRole waits past Mantine's Collapse animation into the + // accessibility tree; getByRole here would race against it. + const removeButton = await screen.findByRole("button", { name: /Remove alias Delete Me/i, }); await user.click(removeButton); diff --git a/web/src/components/series/TrackingPanel.tsx b/web/src/components/series/TrackingPanel.tsx index e4414bbb..aea2ede7 100644 --- a/web/src/components/series/TrackingPanel.tsx +++ b/web/src/components/series/TrackingPanel.tsx @@ -4,6 +4,7 @@ import { Box, Button, Card, + Collapse, Divider, Group, NumberInput, @@ -13,8 +14,15 @@ import { Text, TextInput, Tooltip, + UnstyledButton, } from "@mantine/core"; -import { IconBellRinging, IconPlus, IconTrash } from "@tabler/icons-react"; +import { + IconBellRinging, + IconChevronDown, + IconChevronRight, + IconPlus, + IconTrash, +} from "@tabler/icons-react"; import { type FormEvent, useState } from "react"; import { useCreateSeriesAlias, @@ -53,6 +61,10 @@ export function TrackingPanel({ seriesId, canEdit }: TrackingPanelProps) { const deleteAlias = useDeleteSeriesAlias(seriesId); const [aliasDraft, setAliasDraft] = useState(""); + // Default collapsed so the panel is a thin one-liner unless the user + // explicitly wants to fiddle. The summary in the header carries the + // load-bearing info (tracking on/off, last-known marks, alias count). + const [expanded, setExpanded] = useState(false); const tracking = trackingQuery.data; const aliases = aliasesQuery.data ?? []; @@ -69,19 +81,56 @@ export function TrackingPanel({ seriesId, canEdit }: TrackingPanelProps) { } }; + // Build a compact one-line summary that conveys "what is this series's + // tracking state right now" without expanding. Examples: + // "Tracking · ch 142 · vol 15 · 3 aliases" + // "Tracking · ch 142 · 0 aliases" + // "Not tracked" + // Untracked summary keeps the panel minimal — the toggle is the only + // actionable control until tracking is on. + const summary = (() => { + if (!tracking?.tracked) return "Not tracked"; + const parts: string[] = ["Tracking"]; + if (tracking.trackChapters && tracking.latestKnownChapter != null) { + parts.push(`ch ${tracking.latestKnownChapter}`); + } + if (tracking.trackVolumes && tracking.latestKnownVolume != null) { + parts.push(`vol ${tracking.latestKnownVolume}`); + } + parts.push(`${aliases.length} alias${aliases.length === 1 ? "" : "es"}`); + return parts.join(" · "); + })(); + return ( <Card withBorder padding="md" radius="md"> <Stack gap="sm"> <Group justify="space-between" wrap="nowrap"> - <Group gap="xs"> - <IconBellRinging size={18} /> - <Text fw={600}>Release tracking</Text> - {tracking?.tracked && ( - <Badge color="green" variant="light" size="sm"> - Tracking - </Badge> - )} - </Group> + <UnstyledButton + onClick={() => setExpanded((v) => !v)} + aria-expanded={expanded} + aria-label={ + expanded ? "Collapse release tracking" : "Expand release tracking" + } + style={{ flex: 1, minWidth: 0 }} + > + <Group gap="xs" wrap="nowrap"> + {expanded ? ( + <IconChevronDown size={16} /> + ) : ( + <IconChevronRight size={16} /> + )} + <IconBellRinging size={18} /> + <Text fw={600}>Release tracking</Text> + {tracking?.tracked && ( + <Badge color="green" variant="light" size="sm"> + TRACKING + </Badge> + )} + <Text size="sm" c="dimmed" truncate> + {summary} + </Text> + </Group> + </UnstyledButton> <Switch checked={tracking?.tracked ?? false} onChange={(event) => @@ -92,171 +141,181 @@ export function TrackingPanel({ seriesId, canEdit }: TrackingPanelProps) { /> </Group> - {tracking?.tracked && ( - <> - <Group grow align="flex-start"> - <Select - label="Status" - value={tracking.trackingStatus} - onChange={(value) => { - if (value) updateTracking.mutate({ trackingStatus: value }); - }} - data={STATUS_OPTIONS} - disabled={!canEdit} - /> - <Stack gap={4}> - <Text size="sm" fw={500}> - Announce - </Text> - <Group gap="md"> - <Switch - label="Chapters" - checked={tracking.trackChapters} - onChange={(e) => - updateTracking.mutate({ - trackChapters: e.currentTarget.checked, - }) - } + <Collapse in={expanded}> + <Stack gap="sm" mt="xs"> + {tracking?.tracked && ( + <> + <Group grow align="flex-start"> + <Select + label="Status" + value={tracking.trackingStatus} + onChange={(value) => { + if (value) + updateTracking.mutate({ trackingStatus: value }); + }} + data={STATUS_OPTIONS} + disabled={!canEdit} + /> + <Stack gap={4}> + <Text size="sm" fw={500}> + Announce + </Text> + <Group gap="md"> + <Switch + label="Chapters" + checked={tracking.trackChapters} + onChange={(e) => + updateTracking.mutate({ + trackChapters: e.currentTarget.checked, + }) + } + disabled={!canEdit} + /> + <Switch + label="Volumes" + checked={tracking.trackVolumes} + onChange={(e) => + updateTracking.mutate({ + trackVolumes: e.currentTarget.checked, + }) + } + disabled={!canEdit} + /> + </Group> + </Stack> + </Group> + + <Group grow> + <NumberInput + label="Latest known chapter" + placeholder="—" + value={tracking.latestKnownChapter ?? ""} + onChange={(value) => { + const next = + typeof value === "number" && Number.isFinite(value) + ? value + : null; + updateTracking.mutate({ latestKnownChapter: next }); + }} + allowDecimal + decimalScale={2} + step={0.1} disabled={!canEdit} /> - <Switch - label="Volumes" - checked={tracking.trackVolumes} - onChange={(e) => - updateTracking.mutate({ - trackVolumes: e.currentTarget.checked, - }) - } + <NumberInput + label="Latest known volume" + placeholder="—" + value={tracking.latestKnownVolume ?? ""} + onChange={(value) => { + const next = + typeof value === "number" && + Number.isFinite(value) && + Number.isInteger(value) + ? value + : null; + updateTracking.mutate({ latestKnownVolume: next }); + }} + allowDecimal={false} + step={1} disabled={!canEdit} /> </Group> - </Stack> - </Group> - - <Group grow> - <NumberInput - label="Latest known chapter" - placeholder="—" - value={tracking.latestKnownChapter ?? ""} - onChange={(value) => { - const next = - typeof value === "number" && Number.isFinite(value) - ? value - : null; - updateTracking.mutate({ latestKnownChapter: next }); - }} - allowDecimal - decimalScale={2} - step={0.1} - disabled={!canEdit} - /> - <NumberInput - label="Latest known volume" - placeholder="—" - value={tracking.latestKnownVolume ?? ""} - onChange={(value) => { - const next = - typeof value === "number" && - Number.isFinite(value) && - Number.isInteger(value) - ? value - : null; - updateTracking.mutate({ latestKnownVolume: next }); - }} - allowDecimal={false} - step={1} - disabled={!canEdit} - /> - </Group> - </> - )} + </> + )} - <Divider my="xs" /> + <Divider my="xs" /> - <Box> - <Group justify="space-between" mb="xs"> - <Text size="sm" fw={500}> - Matcher aliases - </Text> - <Text size="xs" c="dimmed"> - {aliases.length} alias{aliases.length === 1 ? "" : "es"} - </Text> - </Group> - <Text size="xs" c="dimmed" mb="xs"> - Used by sources that match by title (Nyaa, MangaUpdates without an - ID). - </Text> + <Box> + <Group justify="space-between" mb="xs"> + <Text size="sm" fw={500}> + Matcher aliases + </Text> + <Text size="xs" c="dimmed"> + {aliases.length} alias{aliases.length === 1 ? "" : "es"} + </Text> + </Group> + <Text size="xs" c="dimmed" mb="xs"> + Used by sources that match by title (Nyaa, MangaUpdates without + an ID). + </Text> - {aliases.length === 0 && ( - <Text size="sm" c="dimmed" fs="italic" mb="xs"> - No aliases yet. Add one below or run the metadata backfill task. - </Text> - )} + {aliases.length === 0 && ( + <Text size="sm" c="dimmed" fs="italic" mb="xs"> + No aliases yet. Add one below or run the metadata backfill + task. + </Text> + )} - <Stack gap={4} mb="xs"> - {aliases.map((alias) => ( - <Group - key={alias.id} - justify="space-between" - wrap="nowrap" - gap="xs" - > - <Group gap="xs" wrap="nowrap" style={{ minWidth: 0, flex: 1 }}> - <Text size="sm" truncate> - {alias.alias} - </Text> - <Badge - color={alias.source === "manual" ? "violet" : "gray"} - variant="light" - size="xs" + <Stack gap={4} mb="xs"> + {aliases.map((alias) => ( + <Group + key={alias.id} + justify="space-between" + wrap="nowrap" + gap="xs" > - {alias.source} - </Badge> - </Group> - {canEdit && ( - <Tooltip label="Remove alias"> - <ActionIcon + <Group + gap="xs" + wrap="nowrap" + style={{ minWidth: 0, flex: 1 }} + > + <Text size="sm" truncate> + {alias.alias} + </Text> + <Badge + color={alias.source === "manual" ? "violet" : "gray"} + variant="light" + size="xs" + > + {alias.source} + </Badge> + </Group> + {canEdit && ( + <Tooltip label="Remove alias"> + <ActionIcon + size="sm" + color="red" + variant="subtle" + onClick={() => deleteAlias.mutate(alias.id)} + loading={ + deleteAlias.isPending && + deleteAlias.variables === alias.id + } + aria-label={`Remove alias ${alias.alias}`} + > + <IconTrash size={14} /> + </ActionIcon> + </Tooltip> + )} + </Group> + ))} + </Stack> + + {canEdit && ( + <form onSubmit={handleAddAlias}> + <Group gap="xs" align="flex-end"> + <TextInput + placeholder="Add an alias…" + value={aliasDraft} + onChange={(e) => setAliasDraft(e.currentTarget.value)} + style={{ flex: 1 }} + disabled={createAlias.isPending} + /> + <Button + type="submit" size="sm" - color="red" - variant="subtle" - onClick={() => deleteAlias.mutate(alias.id)} - loading={ - deleteAlias.isPending && - deleteAlias.variables === alias.id - } - aria-label={`Remove alias ${alias.alias}`} + leftSection={<IconPlus size={14} />} + loading={createAlias.isPending} + disabled={!aliasDraft.trim()} > - <IconTrash size={14} /> - </ActionIcon> - </Tooltip> - )} - </Group> - ))} + Add + </Button> + </Group> + </form> + )} + </Box> </Stack> - - {canEdit && ( - <form onSubmit={handleAddAlias}> - <Group gap="xs" align="flex-end"> - <TextInput - placeholder="Add an alias…" - value={aliasDraft} - onChange={(e) => setAliasDraft(e.currentTarget.value)} - style={{ flex: 1 }} - disabled={createAlias.isPending} - /> - <Button - type="submit" - size="sm" - leftSection={<IconPlus size={14} />} - loading={createAlias.isPending} - disabled={!aliasDraft.trim()} - > - Add - </Button> - </Group> - </form> - )} - </Box> + </Collapse> </Stack> </Card> ); diff --git a/web/src/hooks/useReleaseTrackingApplicability.ts b/web/src/hooks/useReleaseTrackingApplicability.ts new file mode 100644 index 00000000..f4f27307 --- /dev/null +++ b/web/src/hooks/useReleaseTrackingApplicability.ts @@ -0,0 +1,39 @@ +import { useQuery } from "@tanstack/react-query"; +import { releaseSourcesApi } from "@/api/releases"; + +/** + * Whether release tracking is available in the user's current scope. + * + * Backed by `GET /api/v1/release-sources/applicability`, which returns + * `applicable: true` when at least one enabled `release_source` plugin + * applies to `libraryId` (or, with `libraryId` omitted, applies to *any* + * library — useful for the global navigation Releases entry). + * + * Single source of truth for three UI gates: + * + * 1. **Per-series Tracking panel + Releases tab**: hide entirely on + * libraries with no covering plugin. Avoids dead-end UI like "click to + * track this series" on a library that has no plugin to actually do + * anything with the tracked state. + * + * 2. **Bulk-selection menu Track / Don't track entries**: only show when + * at least one selected series's library is covered. Mirrors how + * `getActions("series:bulk")` gates other plugin-driven entries. + * + * 3. **Top-level "Releases" navigation**: hidden when no plugin is + * installed at all (no `libraryId` argument). + * + * The query is cheap (one DB hit, no joins) and stale-cached for 5 minutes + * because the answer only flips when an admin enables/disables a plugin + * or changes its library scope — both rare operations. + */ +export function useReleaseTrackingApplicability(libraryId?: string) { + return useQuery({ + queryKey: ["release-tracking-applicability", libraryId ?? null], + queryFn: () => releaseSourcesApi.applicability(libraryId), + // Plugin install/disable is rare; treat the answer as essentially static + // for the life of a normal session. Mutations on the plugin admin page + // can invalidate this key explicitly if we ever want instant updates. + staleTime: 5 * 60 * 1000, + }); +} diff --git a/web/src/pages/SeriesDetail.tsx b/web/src/pages/SeriesDetail.tsx index 0caf9020..d1640894 100644 --- a/web/src/pages/SeriesDetail.tsx +++ b/web/src/pages/SeriesDetail.tsx @@ -72,6 +72,7 @@ import { import { formatSeriesCounts } from "@/components/series/seriesCounts"; import { useDynamicDocumentTitle } from "@/hooks/useDocumentTitle"; import { usePermissions } from "@/hooks/usePermissions"; +import { useReleaseTrackingApplicability } from "@/hooks/useReleaseTrackingApplicability"; import { useSeriesTracking } from "@/hooks/useSeriesTracking"; import { useCoverUpdatesStore } from "@/store/coverUpdatesStore"; import { PERMISSIONS } from "@/types/permissions"; @@ -160,6 +161,15 @@ export function SeriesDetail() { // the TrackingPanel below. const { data: tracking } = useSeriesTracking(seriesId ?? "", !!seriesId); + // Whether any enabled release-source plugin applies to this series's + // library. Drives whether the TrackingPanel + SeriesReleasesPanel render + // at all — on libraries with no covering plugin the panels would be a + // dead-end (you can flip `tracked: true` but nothing would ever poll). + const { data: releaseTrackingApplicability } = + useReleaseTrackingApplicability(series?.libraryId); + const releaseTrackingAvailable = + releaseTrackingApplicability?.applicable === true; + // Fetch available plugin actions for series:detail scope, filtered by library const { data: pluginActions } = useQuery({ queryKey: ["plugin-actions", "series:detail", series?.libraryId], @@ -1019,15 +1029,19 @@ export function SeriesDetail() { </Group> )} - {/* Release tracking (admin/editor surface; query stays cheap when collapsed) */} - {canEditSeries && ( + {/* Release tracking (admin/editor surface; query stays cheap when collapsed). + Hidden on libraries with no covering release-source plugin. */} + {canEditSeries && releaseTrackingAvailable && ( <TrackingPanel seriesId={series.id} canEdit={canEditSeries} /> )} - {/* Releases panel: ledger entries grouped by chapter/volume. - Shows whenever the series has tracking enabled — the panel - renders an empty-state message if no entries exist yet. */} - {tracking?.tracked && <SeriesReleasesPanel seriesId={series.id} />} + {/* Releases panel: ledger entries grouped by chapter/volume. Shows + whenever the series has tracking enabled and a plugin can + actually deliver releases — otherwise the panel would render + an empty inbox with no path to ever populate. */} + {tracking?.tracked && releaseTrackingAvailable && ( + <SeriesReleasesPanel seriesId={series.id} /> + )} {/* External Links */} {series.externalLinks && series.externalLinks.length > 0 && ( diff --git a/web/src/pages/settings/ReleaseTrackingSettings.tsx b/web/src/pages/settings/ReleaseTrackingSettings.tsx index 9724ada6..9e3f4eaa 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.tsx @@ -398,7 +398,14 @@ function ReleaseSourceRow({ </Group> </Table.Td> <Table.Td> - <Text size="xs">{lastPolled}</Text> + <Stack gap={2}> + <Text size="xs">{lastPolled}</Text> + {source.lastSummary && ( + <Text size="xs" c="dimmed" lineClamp={2}> + {source.lastSummary} + </Text> + )} + </Stack> </Table.Td> <Table.Td> {source.lastError ? ( @@ -414,9 +421,20 @@ function ReleaseSourceRow({ </Badge> </Tooltip> ) : source.lastPolledAt ? ( - <Badge color="green" variant="light" size="sm"> - OK - </Badge> + // Wrap the OK badge in a tooltip carrying `lastSummary` so users + // can see *why* a poll returned nothing (no tracked series, 304, + // dropped below threshold, etc.) without grepping logs. + <Tooltip + label={source.lastSummary ?? "Last poll completed successfully."} + multiline + w={300} + withArrow + position="top" + > + <Badge color="green" variant="light" size="sm"> + OK + </Badge> + </Tooltip> ) : ( <Badge color="gray" variant="light" size="sm"> Never polled diff --git a/web/src/types/api.generated.ts b/web/src/types/api.generated.ts index 5523d804..cb320e3e 100644 --- a/web/src/types/api.generated.ts +++ b/web/src/types/api.generated.ts @@ -2441,6 +2441,34 @@ export interface paths { patch?: never; trace?: never; }; + "/api/v1/release-sources/applicability": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Whether release tracking is available for a given library. + * @description Read-only, requires only `SeriesRead`: the response carries no + * admin-sensitive data (no plugin IDs, no configs, no library + * allowlists), just the boolean and friendly display names. Used by the + * frontend to: + * + * - hide the per-series Tracking panel + Releases tab on libraries with + * no applicable plugin (cleaner UX); + * - decide whether to show the "Track for releases" / "Don't track for + * releases" entries in the bulk-selection menu. + */ + get: operations["get_release_tracking_applicability"]; + put?: never; + post?: never; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/v1/release-sources/{source_id}": { parameters: { query?: never; @@ -2840,6 +2868,35 @@ export interface paths { patch?: never; trace?: never; }; + "/api/v1/series/bulk/track-for-releases": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Bulk-enable release tracking for multiple series. + * @description For each `series_id` in the request, flips `series_tracking.tracked` to + * `true` and runs the seed pass (auto-derives aliases, `latest_known_*`, + * `track_chapters` / `track_volumes` from existing data). Series that don't + * exist are reported as `outcome: skipped`. Series already tracked are + * reported as `outcome: skipped, detail: "already tracked"` and the seed is + * not re-run (idempotent — a re-run would simply re-derive identical + * values, but we skip the work). + * + * Mirrors the per-series PATCH `false -> true` transition: same seed + * function, same idempotency guarantees. + */ + post: operations["bulk_track_series_for_releases"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/v1/series/bulk/unread": { parameters: { query?: never; @@ -2861,6 +2918,29 @@ export interface paths { patch?: never; trace?: never; }; + "/api/v1/series/bulk/untrack-for-releases": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + /** + * Bulk-disable release tracking for multiple series. + * @description Flips `series_tracking.tracked` to `false`. Does not delete aliases, + * `latest_known_*`, or other tracking config — the user can re-track + * without losing customizations, and the seed will re-derive any + * auto-derived fields on the next track-on transition. + */ + post: operations["bulk_untrack_series_for_releases"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; "/api/v1/series/in-progress": { parameters: { query?: never; @@ -6433,6 +6513,24 @@ export interface components { */ version: string; }; + /** @description Response shape for `GET /api/v1/release-sources/applicability`. */ + ApplicabilityResponse: { + /** + * @description `true` when at least one enabled `release_source` plugin applies to + * the requested library (or, if no `libraryId` was supplied, to *any* + * library). The frontend uses this to decide whether to render the + * per-series Tracking panel and Releases tab, or to show the + * bulk-track menu entry. + */ + applicable: boolean; + /** + * @description Plugin display names (or fallback to `name` when no manifest cached + * yet) of the enabled release-source plugins covering this library. + * Empty when `applicable` is `false`. Useful for surfacing "Powered by + * MangaUpdates, Nyaa" hints in the UI. + */ + pluginDisplayNames: string[]; + }; /** @description Author context for template evaluation. */ AuthorContextDto: { /** @@ -8603,6 +8701,41 @@ export interface components { */ taskId: string; }; + /** + * @description Per-series outcome of a bulk track / untrack operation. + * + * Returned in `BulkTrackForReleasesResponse.results` so the UI can show a + * per-row status (e.g. "tracked", "skipped: not found", "errored: …") without + * re-querying the tracking config endpoint per series. + */ + BulkTrackForReleasesItem: { + /** + * @description Free-form detail (error message for `errored`, reason for `skipped`). + * `None` for the success cases. + */ + detail?: string | null; + /** @description `tracked` | `untracked` | `skipped` | `errored`. */ + outcome: string; + /** Format: uuid */ + seriesId: string; + }; + /** + * @description Aggregate result of `POST /series/bulk/track-for-releases` and its untrack + * counterpart. Counts and per-series outcomes for client-side display. + */ + BulkTrackForReleasesResponse: { + /** @description Series whose `tracked` flag was already in the target state. No-ops. */ + alreadyInState: number; + /** + * @description Series successfully flipped to `tracked = true` (or `false` for the + * untrack endpoint). + */ + changed: number; + /** @description Series that could not be processed (missing, error, etc.). */ + errored: number; + /** @description Per-series outcomes in input order. */ + results: components["schemas"]["BulkTrackForReleasesItem"][]; + }; /** @description Request to update metadata locks for multiple books */ BulkUpdateBookLocksRequest: components["schemas"]["UpdateBookMetadataLocksRequest"] & { /** @description Book IDs to update locks for (max 500) */ @@ -14603,6 +14736,13 @@ export interface components { lastErrorAt?: string | null; /** Format: date-time */ lastPolledAt?: string | null; + /** + * @description One-line summary of the most recent successful poll. Surfaced under + * the row's status badge so users can see *why* a poll returned no + * announcements without grepping logs. NULL until the first successful + * poll on the source. + */ + lastSummary?: string | null; /** * @description Owning plugin id, or `core` for in-core synthetic sources. * @example release-nyaa @@ -23240,6 +23380,40 @@ export interface operations { }; }; }; + get_release_tracking_applicability: { + parameters: { + query?: { + /** + * @description Optional library scope. When provided, only plugins that apply to + * this library are considered (a plugin's `library_ids` field is + * either empty = all, or contains this UUID). + */ + libraryId?: string | null; + }; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Applicability info */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ApplicabilityResponse"]; + }; + }; + /** @description SeriesRead permission required */ + 403: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + }; + }; update_release_source: { parameters: { query?: never; @@ -24032,6 +24206,44 @@ export interface operations { }; }; }; + bulk_track_series_for_releases: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["BulkSeriesRequest"]; + }; + }; + responses: { + /** @description Bulk-tracked series */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["BulkTrackForReleasesResponse"]; + }; + }; + /** @description Unauthorized */ + 401: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Forbidden */ + 403: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + }; + }; bulk_mark_series_as_unread: { parameters: { query?: never; @@ -24070,6 +24282,44 @@ export interface operations { }; }; }; + bulk_untrack_series_for_releases: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["BulkSeriesRequest"]; + }; + }; + responses: { + /** @description Bulk-untracked series */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["BulkTrackForReleasesResponse"]; + }; + }; + /** @description Unauthorized */ + 401: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Forbidden */ + 403: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + }; + }; list_in_progress_series: { parameters: { query?: { From 1f6f7e8ed753c24560c13dabe73cd686a4b991e9 Mon Sep 17 00:00:00 2001 From: Sylvain Cau <ashdevfr@gmail.com> Date: Mon, 4 May 2026 22:01:30 -0700 Subject: [PATCH 13/29] fix(release-tracking): match api: external IDs, dedup polls, emit polled SSE event Several gaps in the release-tracking polling pipeline surfaced once it was exercised in real use: - The host's releases/list_tracked filter only stripped `plugin:` prefixes from external-ID source strings, but metadata plugins (MangaBaka, OpenLibrary, etc.) write IDs with `api:<service>` per the SDK convention. As a result, MangaUpdates received zero IDs and reported "Fetched 0 items" even on series that had been cross-referenced. Strip both `api:` and `plugin:` namespaces in strip_external_id_namespace. - ReleasePollResponse gained parsed/matched/recorded/deduped counters. Streaming plugins (Nyaa, MangaUpdates) record candidates via the releases/record reverse-RPC mid-poll and return an empty `candidates` array, so the host's `last_summary` always read "Fetched 0 items" regardless of activity. Both plugins now report counters; the host folds them into PollReleaseSourceResult via fold_streaming_counters, with a `matched - recorded` fallback for older plugins that omit `deduped`. - Concurrent "Poll now" requests no longer stack. enqueue_poll_now checks for an in-flight pending or processing task on the same source_id and coalesces onto it; the handler returns status="already_running" so the user sees the dedup explicitly. Backed by a generic TaskRepository::find_pending_or_processing_by_param helper that works on both SQLite and Postgres. - Added a POST /api/v1/release-sources/{id}/reset endpoint (admin-only) that drops every release_ledger row for the source and clears its transient poll state (etag, last_polled_at, last_error, last_summary). User-managed fields (enabled, pollIntervalS, displayName, config) are preserved. Surfaced as a red restore icon in the Release tracking settings table behind a confirm dialog. - ReleaseLedgerEntryDto now carries series_title joined from the series row, so the inbox UI renders human labels instead of sliced UUIDs. The Nyaa plugin also surfaces the post-page URL (from <guid isPermaLink>) as payloadUrl instead of the .torrent download URL. - New EntityEvent::ReleaseSourcePolled emitted at the end of every poll task run (success and error paths). The Release tracking settings page subscribes to it and invalidates the sources query, so last_polled_at / last_summary / status badges refresh in real time without a manual reload. Also drops the unused `tracking_status` column from series_tracking and its DTO/handler/UI surface; publication status is sourced from upstream metadata, not stored on the tracking sidecar. Tests cover external-ID prefix matching, streaming-counter folding, poll-now dedup, source reset (with user-field preservation), and the new SSE event variant. --- docs/api/openapi.json | 121 ++++++++++-- ...20260503_000072_create_release_tracking.rs | 8 - plugins/release-mangaupdates/src/index.ts | 55 +++++- plugins/release-nyaa/src/index.ts | 26 ++- plugins/release-nyaa/src/parser.test.ts | 13 ++ plugins/release-nyaa/src/parser.ts | 23 ++- plugins/sdk-typescript/src/types/releases.ts | 29 +++ src/api/docs.rs | 2 + src/api/routes/v1/dto/release.rs | 27 ++- src/api/routes/v1/dto/tracking.rs | 6 - src/api/routes/v1/handlers/releases.rs | 145 ++++++++++++-- src/api/routes/v1/handlers/tracking.rs | 11 +- src/api/routes/v1/routes/releases.rs | 4 + src/db/entities/series_tracking.rs | 31 --- src/db/repositories/release_ledger.rs | 67 +++++++ src/db/repositories/release_sources.rs | 72 +++++++ src/db/repositories/series_tracking.rs | 44 +---- src/db/repositories/task.rs | 69 +++++++ src/events/types.rs | 37 ++++ src/scheduler/release_sources.rs | 45 ++++- src/services/plugin/protocol.rs | 27 +++ src/services/plugin/releases_handler.rs | 106 +++++++++- src/services/release/upstream_gap.rs | 1 - src/tasks/handlers/poll_release_source.rs | 167 +++++++++++++++- tests/api/releases.rs | 187 +++++++++++++++++- tests/api/tracking.rs | 25 --- web/openapi.json | 121 ++++++++++-- web/src/api/releases.ts | 18 ++ .../series/SeriesReleasesPanel.test.tsx | 1 + .../components/series/TrackingPanel.test.tsx | 11 +- web/src/components/series/TrackingPanel.tsx | 75 +++---- web/src/hooks/useEntityEvents.ts | 8 + web/src/hooks/useReleases.ts | 22 +++ web/src/pages/ReleasesInbox.test.tsx | 11 ++ web/src/pages/ReleasesInbox.tsx | 5 +- .../settings/ReleaseTrackingSettings.tsx | 52 +++-- web/src/types/api.generated.ts | 126 ++++++++++-- 37 files changed, 1512 insertions(+), 286 deletions(-) diff --git a/docs/api/openapi.json b/docs/api/openapi.json index dda4e3ec..c1cd25fb 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -7299,6 +7299,54 @@ ] } }, + "/api/v1/release-sources/{source_id}/reset": { + "post": { + "tags": [ + "Releases" + ], + "summary": "Reset a release source to a clean slate.", + "description": "Deletes every `release_ledger` row owned by the source and clears the\nsource's transient poll state (`etag`, `last_polled_at`, `last_error`,\n`last_error_at`, `last_summary`). User-managed fields (`enabled`,\n`poll_interval_s`, `display_name`, `config`) are preserved.\n\nIntended for testing/troubleshooting: after a reset, the next poll\nfetches the upstream feed without an `If-None-Match` header (so no 304\nshort-circuit) and re-records every release as `announced`. Does NOT\nauto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after\nresetting if you want immediate re-fetch.", + "operationId": "reset_release_source", + "parameters": [ + { + "name": "source_id", + "in": "path", + "description": "Source ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "Source reset", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ResetReleaseSourceResponse" + } + } + } + }, + "403": { + "description": "PluginsManage permission required" + }, + "404": { + "description": "Source not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/releases": { "get": { "tags": [ @@ -12332,9 +12380,6 @@ } } }, - "400": { - "description": "Invalid tracking_status" - }, "403": { "description": "Forbidden" }, @@ -23554,6 +23599,36 @@ "description": "Volume announced (if the source emits volumes)." } } + }, + { + "type": "object", + "description": "A release source's poll task completed.\n\nEmitted at the end of every `poll_release_source` task run, after\n`release_sources.last_summary` / `last_polled_at` / `etag` have been\npersisted. The frontend uses this to refresh the Release tracking\nsettings page in real time so users don't have to reload to see a\n\"Poll now\" finish. Carries no diff details — receivers should\ninvalidate the source query and re-read the row.", + "required": [ + "sourceId", + "pluginId", + "hadError", + "type" + ], + "properties": { + "hadError": { + "type": "boolean", + "description": "`true` if the poll wrote a `last_error`. Cheap \"did it fail\"\nhint without forcing the client to refetch." + }, + "pluginId": { + "type": "string", + "description": "Plugin that owns the source (`release_sources.plugin_id`).\nCheap filter for clients only watching certain plugins." + }, + "sourceId": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "release_source_polled" + ] + } + } } ], "description": "Specific event types for entity changes" @@ -29427,6 +29502,7 @@ "required": [ "id", "seriesId", + "seriesTitle", "sourceId", "externalReleaseId", "payloadUrl", @@ -29501,6 +29577,11 @@ "format": "uuid", "example": "550e8400-e29b-41d4-a716-446655440002" }, + "seriesTitle": { + "type": "string", + "description": "Series title at the time of the response. Joined from the `series`\ntable so the inbox UI can render a human-readable label without a\nfollow-up fetch. Falls back to the empty string only if the series\nrow was hard-deleted between the join and the read.", + "example": "Chainsaw Man" + }, "sourceId": { "type": "string", "format": "uuid", @@ -32778,6 +32859,7 @@ "required": [ "id", "seriesId", + "seriesTitle", "sourceId", "externalReleaseId", "payloadUrl", @@ -32852,6 +32934,11 @@ "format": "uuid", "example": "550e8400-e29b-41d4-a716-446655440002" }, + "seriesTitle": { + "type": "string", + "description": "Series title at the time of the response. Joined from the `series`\ntable so the inbox UI can render a human-readable label without a\nfollow-up fetch. Falls back to the empty string only if the series\nrow was hard-deleted between the join and the read.", + "example": "Chainsaw Man" + }, "sourceId": { "type": "string", "format": "uuid", @@ -33602,6 +33689,21 @@ } } }, + "ResetReleaseSourceResponse": { + "type": "object", + "description": "Response shape from the `reset` endpoint.\n\nReturns the number of ledger rows removed so callers can show a\nconfirmation toast. The source's transient poll state (etag,\nlast_polled_at, last_error, last_summary) is also cleared, but those\nare not counted here.", + "required": [ + "deletedLedgerEntries" + ], + "properties": { + "deletedLedgerEntries": { + "type": "integer", + "format": "int64", + "description": "Number of `release_ledger` rows deleted for this source.", + "minimum": 0 + } + } + }, "RetryAllErrorsRequest": { "type": "object", "description": "Request body for bulk retrying all book errors", @@ -35082,7 +35184,6 @@ "required": [ "seriesId", "tracked", - "trackingStatus", "trackChapters", "trackVolumes", "createdAt", @@ -35154,11 +35255,6 @@ "type": "boolean", "description": "Whether release tracking is enabled." }, - "trackingStatus": { - "type": "string", - "description": "Publication status: `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`.", - "example": "ongoing" - }, "updatedAt": { "type": "string", "format": "date-time", @@ -38294,13 +38390,6 @@ "null" ] }, - "trackingStatus": { - "type": [ - "string", - "null" - ], - "description": "`ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`." - }, "volumeChapterMap": {} } }, diff --git a/migration/src/m20260503_000072_create_release_tracking.rs b/migration/src/m20260503_000072_create_release_tracking.rs index 0eab5b06..49b98d08 100644 --- a/migration/src/m20260503_000072_create_release_tracking.rs +++ b/migration/src/m20260503_000072_create_release_tracking.rs @@ -46,13 +46,6 @@ impl MigrationTrait for Migration { .not_null() .default(false), ) - // 'ongoing' | 'complete' | 'hiatus' | 'cancelled' | 'unknown' - .col( - ColumnDef::new(SeriesTracking::TrackingStatus) - .string_len(20) - .not_null() - .default("unknown"), - ) .col( ColumnDef::new(SeriesTracking::TrackChapters) .boolean() @@ -232,7 +225,6 @@ pub enum SeriesTracking { Table, SeriesId, Tracked, - TrackingStatus, TrackChapters, TrackVolumes, LatestKnownChapter, diff --git a/plugins/release-mangaupdates/src/index.ts b/plugins/release-mangaupdates/src/index.ts index a360b7f2..01153413 100644 --- a/plugins/release-mangaupdates/src/index.ts +++ b/plugins/release-mangaupdates/src/index.ts @@ -204,7 +204,11 @@ export interface SeriesPollOutcome { fetched: boolean; notModified: boolean; parsed: number; + /** Of those parsed, how many passed client-side filters and were sent to record. */ + matched: number; recorded: number; + /** Of those sent to record, how many the host deduped onto an existing row. */ + deduped: number; upstreamStatus: number; /** New ETag returned by upstream (only set when fetched=true). */ etag: string | null; @@ -236,7 +240,9 @@ export async function pollSeries( fetched: false, notModified: false, parsed: 0, + matched: 0, recorded: 0, + deduped: 0, upstreamStatus: 0, etag: null, error: "missing mangaupdates external ID", @@ -260,7 +266,9 @@ export async function pollSeries( fetched: true, notModified: true, parsed: 0, + matched: 0, recorded: 0, + deduped: 0, upstreamStatus: 304, etag: null, error: "", @@ -273,7 +281,9 @@ export async function pollSeries( fetched: false, notModified: false, parsed: 0, + matched: 0, recorded: 0, + deduped: 0, upstreamStatus: result.status, etag: null, error: result.message, @@ -286,19 +296,29 @@ export async function pollSeries( languages: effectiveLanguagesForSeries(entry), blockedGroups: options.blockedGroups, }); + let matched = 0; let recorded = 0; + let deduped = 0; for (const item of items) { if (!passesFilters(item, filters)) continue; + matched++; const candidate = toCandidate(entry, item); const outcome = await recordCandidate(rpc, sourceId, candidate); - if (outcome && !outcome.deduped) recorded++; + if (!outcome) continue; + if (outcome.deduped) { + deduped++; + } else { + recorded++; + } } return { seriesId: entry.seriesId, fetched: true, notModified: false, parsed: items.length, + matched, recorded, + deduped, upstreamStatus: 200, etag: result.etag, error: "", @@ -314,10 +334,17 @@ async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<Rel const blockedGroups = parseCommaList(state.blockedGroupsCsv); let parsed = 0; + let matched = 0; let recorded = 0; + let deduped = 0; let worstStatus = 200; let lastEtag: string | null = null; let seenSeries = 0; + // Series the host returned that lack a MangaUpdates external ID. A high + // count here is the most common cause of an "empty" poll: the plugin + // can't fetch a feed without an MU ID, so the user needs to populate + // those (manual paste or metadata refresh from MangaBaka). + let skippedNoMuId = 0; for await (const entry of iterateTrackedSeries(rpc, sourceId)) { seenSeries++; @@ -326,29 +353,45 @@ async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<Rel timeoutMs: state.requestTimeoutMs, }); parsed += outcome.parsed; + matched += outcome.matched; recorded += outcome.recorded; + deduped += outcome.deduped; if (outcome.upstreamStatus > worstStatus) { worstStatus = outcome.upstreamStatus; } if (outcome.etag) lastEtag = outcome.etag; - if (outcome.error) { + if (outcome.error === "missing mangaupdates external ID") { + skippedNoMuId++; + } else if (outcome.error) { logger.warn(`series ${entry.seriesId}: ${outcome.error} (status ${outcome.upstreamStatus})`); } } + if (skippedNoMuId > 0) { + logger.info( + `skipped ${skippedNoMuId} of ${seenSeries} tracked series for source=${sourceId}: no mangaupdates external ID. Add one in the Tracking panel or run a metadata refresh.`, + ); + } + logger.info( - `poll complete: source=${sourceId} series=${seenSeries} parsed=${parsed} recorded=${recorded} worst_status=${worstStatus}`, + `poll complete: source=${sourceId} series=${seenSeries} skipped=${skippedNoMuId} parsed=${parsed} matched=${matched} recorded=${recorded} deduped=${deduped} worst_status=${worstStatus}`, ); - // The plugin streamed candidates already (no `candidates` payload). Pass - // through the worst upstream status the host should consider for backoff. + // Report counters back to the host so the source's `last_summary` is + // accurate. Without these the host only sees the (empty) `candidates` + // payload — we record via reverse-RPC mid-poll — and the badge reads + // "Fetched 0 items" no matter what actually happened. // Per-series ETags don't align with the per-source state slot, so we - // intentionally leave `etag` undefined here unless we actually saw one + // intentionally leave `etag` undefined unless we actually saw one // (which today we won't, since we don't pass If-None-Match per series). return { notModified: false, upstreamStatus: worstStatus, + parsed, + matched, + recorded, + deduped, ...(lastEtag !== null ? { etag: lastEtag } : {}), }; } diff --git a/plugins/release-nyaa/src/index.ts b/plugins/release-nyaa/src/index.ts index 38dea6a0..5a8a1e87 100644 --- a/plugins/release-nyaa/src/index.ts +++ b/plugins/release-nyaa/src/index.ts @@ -185,6 +185,8 @@ export interface SubscriptionPollOutcome { parsed: number; matched: number; recorded: number; + /** Of those sent to record, how many the host deduped onto an existing row. */ + deduped: number; upstreamStatus: number; /** New ETag returned by upstream (only set when fetched=true). */ etag: string | null; @@ -226,7 +228,8 @@ function toCandidate( volume: item.volume, language: "en", groupOrUploader: item.group ?? (subscription.kind === "user" ? subscription.identifier : null), - payloadUrl: item.link.length > 0 ? item.link : `urn:nyaa:${item.externalReleaseId}`, + payloadUrl: + item.pageUrl ?? (item.link.length > 0 ? item.link : `urn:nyaa:${item.externalReleaseId}`), infoHash: item.infoHash, formatHints, observedAt: item.observedAt, @@ -263,6 +266,7 @@ export async function pollSubscription( parsed: 0, matched: 0, recorded: 0, + deduped: 0, upstreamStatus: 304, etag: null, error: "", @@ -277,6 +281,7 @@ export async function pollSubscription( parsed: 0, matched: 0, recorded: 0, + deduped: 0, upstreamStatus: result.status, etag: null, error: result.message, @@ -287,6 +292,7 @@ export async function pollSubscription( const items = parseFeed(result.body); let matched = 0; let recorded = 0; + let deduped = 0; for (const item of items) { const m = matchSeries(item.seriesGuess, candidates, { fuzzyFloor: options.minConfidence, @@ -295,7 +301,12 @@ export async function pollSubscription( matched++; const candidate = toCandidate(m, item, subscription); const outcome = await recordCandidate(rpc, sourceId, candidate); - if (outcome && !outcome.deduped) recorded++; + if (!outcome) continue; + if (outcome.deduped) { + deduped++; + } else { + recorded++; + } } return { subscription, @@ -304,6 +315,7 @@ export async function pollSubscription( parsed: items.length, matched, recorded, + deduped, upstreamStatus: 200, etag: result.etag, error: "", @@ -371,12 +383,20 @@ async function poll(params: ReleasePollRequest, rpc: HostRpcClient): Promise<Rel } logger.info( - `poll complete: source=${sourceId} subscription=${subscription.kind}:${subscription.identifier} tracked=${tracked.length} parsed=${outcome.parsed} matched=${outcome.matched} recorded=${outcome.recorded} status=${outcome.upstreamStatus}${outcome.notModified ? " (304)" : ""}`, + `poll complete: source=${sourceId} subscription=${subscription.kind}:${subscription.identifier} tracked=${tracked.length} parsed=${outcome.parsed} matched=${outcome.matched} recorded=${outcome.recorded} deduped=${outcome.deduped} status=${outcome.upstreamStatus}${outcome.notModified ? " (304)" : ""}`, ); + // Report counters back to the host so it can build a meaningful + // `last_summary` for the source. Without these, the host only sees the + // (empty) `candidates` payload — we record via reverse-RPC mid-poll — + // and the status badge reads "Fetched 0 items" even on a busy poll. return { notModified: outcome.notModified, upstreamStatus: outcome.upstreamStatus, + parsed: outcome.parsed, + matched: outcome.matched, + recorded: outcome.recorded, + deduped: outcome.deduped, ...(outcome.etag !== null ? { etag: outcome.etag } : {}), }; } diff --git a/plugins/release-nyaa/src/parser.test.ts b/plugins/release-nyaa/src/parser.test.ts index 6332c694..725378d7 100644 --- a/plugins/release-nyaa/src/parser.test.ts +++ b/plugins/release-nyaa/src/parser.test.ts @@ -147,6 +147,7 @@ describe("parseItem", () => { if (item === null) return; expect(item.title).toBe("[1r0n] Chainsaw Man - Chapter 142 (Digital)"); expect(item.link).toBe("https://nyaa.si/download/12345.torrent"); + expect(item.pageUrl).toBe("https://nyaa.si/view/12345"); expect(item.externalReleaseId).toBe("https://nyaa.si/view/12345"); // guid wins expect(item.infoHash).toBe("abc123def456"); // lowercased expect(item.chapter).toBe(142); @@ -158,6 +159,18 @@ describe("parseItem", () => { expect(parseItem("<item><link>x</link></item>")).toBeNull(); }); + it("returns null pageUrl when guid is not a /view/ permalink", () => { + const xml = `<item> + <title><![CDATA[[1r0n] Foo c.1 (Digital)]]> + https://nyaa.si/download/9.torrent + tag:nyaa.si,2026:9 + `; + const item = parseItem(xml); + expect(item).not.toBeNull(); + if (item === null) return; + expect(item.pageUrl).toBeNull(); + }); + it("derives a deterministic externalReleaseId from infoHash when guid+link missing", () => { const xml = ` <![CDATA[[1r0n] Foo c.1 (Digital)]]> diff --git a/plugins/release-nyaa/src/parser.ts b/plugins/release-nyaa/src/parser.ts index 635fc843..1479a66d 100644 --- a/plugins/release-nyaa/src/parser.ts +++ b/plugins/release-nyaa/src/parser.ts @@ -47,8 +47,14 @@ export interface ParsedRssItem { group: string | null; /** Format hints as a small dictionary (digital, jxl, ...). */ formatHints: Record; - /** Magnet/torrent link or release page URL. */ + /** RSS `` value. On Nyaa this is the `.torrent` download URL. */ link: string; + /** + * Permalink to the release post page (e.g. `https://nyaa.si/view/12345`), + * derived from the `` tag. Null when the guid is + * missing or doesn't look like a post URL. + */ + pageUrl: string | null; /** `nyaa:infoHash` value, lowercased; null if missing. */ infoHash: string | null; /** ISO-8601 timestamp. Falls back to "now" if pubDate is missing/invalid. */ @@ -286,6 +292,20 @@ function pubDateToIso(raw: string | null): string { return new Date().toISOString(); } +/** + * Pull the post-page URL out of the guid when it looks like a Nyaa + * `/view/` permalink. The `` tag in Nyaa feeds is the `.torrent` + * download URL, which is not what we want to surface to users. + */ +function derivePageUrl(guid: string | null): string | null { + if (!guid) return null; + const trimmed = guid.trim(); + if (trimmed.length === 0) return null; + // Match http(s):///view/ with optional trailing slash / query. + if (/^https?:\/\/[^/]+\/view\/[^/?#]+/i.test(trimmed)) return trimmed; + return null; +} + function deriveExternalReleaseId( guid: string | null, link: string | null, @@ -333,6 +353,7 @@ export function parseItem(itemXml: string): ParsedRssItem | null { group: parsedTitle.group, formatHints: parsedTitle.formatHints, link: link ?? "", + pageUrl: derivePageUrl(guid), infoHash, observedAt: pubDateToIso(pubDate), }; diff --git a/plugins/sdk-typescript/src/types/releases.ts b/plugins/sdk-typescript/src/types/releases.ts index f5765bcf..d535f537 100644 --- a/plugins/sdk-typescript/src/types/releases.ts +++ b/plugins/sdk-typescript/src/types/releases.ts @@ -239,6 +239,12 @@ export interface ReleasePollRequest { * Plugins may also stream candidates over `releases/record` mid-poll; the * host treats both styles identically. Use `candidates` for plugins that * prefer to return everything at once. + * + * Plugins that stream via `releases/record` should also populate the + * counter fields (`parsed`, `matched`, `recorded`, `deduped`). Without + * them, the host can only see what came back in `candidates` and the + * source's status badge will read "Fetched 0 items" no matter what + * actually happened. */ export interface ReleasePollResponse { /** Optional batch of candidates the host should evaluate and ledger. */ @@ -249,4 +255,27 @@ export interface ReleasePollResponse { notModified?: boolean; /** HTTP status code observed (used by host's per-host backoff). */ upstreamStatus?: number; + /** + * Items the plugin parsed from the upstream feed before any matching + * or threshold filtering. Streaming plugins should set this so the + * host's `last_summary` reflects upstream activity, not just the shape + * of the response payload. + */ + parsed?: number; + /** + * Of those parsed, the count that matched a tracked-series alias (i.e. + * became candidates the plugin then evaluated/streamed). + */ + matched?: number; + /** + * Of those matched, the count actually inserted into the ledger + * (excludes dedupes). For plugins that stream via `releases/record`, + * this is the count of non-deduped record outcomes. + */ + recorded?: number; + /** + * Of those matched, the count the host deduped onto an existing ledger + * row. Optional; when omitted the host infers `matched - recorded`. + */ + deduped?: number; } diff --git a/src/api/docs.rs b/src/api/docs.rs index 0f649a38..162f4952 100644 --- a/src/api/docs.rs +++ b/src/api/docs.rs @@ -272,6 +272,7 @@ The following paths are exempt from rate limiting: v1::handlers::releases::list_release_sources, v1::handlers::releases::update_release_source, v1::handlers::releases::poll_release_source_now, + v1::handlers::releases::reset_release_source, v1::handlers::releases::get_release_tracking_applicability, // Cover management endpoints @@ -713,6 +714,7 @@ The following paths are exempt from rate limiting: v1::dto::release::ReleaseSourceListResponse, v1::dto::release::UpdateReleaseSourceRequest, v1::dto::release::PollNowResponse, + v1::dto::release::ResetReleaseSourceResponse, v1::handlers::releases::ApplicabilityResponse, v1::dto::PaginatedResponse, diff --git a/src/api/routes/v1/dto/release.rs b/src/api/routes/v1/dto/release.rs index 6a84d55f..3521874d 100644 --- a/src/api/routes/v1/dto/release.rs +++ b/src/api/routes/v1/dto/release.rs @@ -29,6 +29,12 @@ pub struct ReleaseLedgerEntryDto { pub id: Uuid, #[schema(example = "550e8400-e29b-41d4-a716-446655440002")] pub series_id: Uuid, + /// Series title at the time of the response. Joined from the `series` + /// table so the inbox UI can render a human-readable label without a + /// follow-up fetch. Falls back to the empty string only if the series + /// row was hard-deleted between the join and the read. + #[schema(example = "Chainsaw Man")] + pub series_title: String, #[schema(example = "550e8400-e29b-41d4-a716-446655440b00")] pub source_id: Uuid, /// Plugin-stable identity for the release (used for dedup). @@ -62,11 +68,15 @@ pub struct ReleaseLedgerEntryDto { pub created_at: DateTime, } -impl From for ReleaseLedgerEntryDto { - fn from(m: release_ledger::Model) -> Self { +impl ReleaseLedgerEntryDto { + /// Build a DTO from a ledger row plus the joined series title. The title + /// must be looked up by the caller (typically a batch query in the + /// handler) since `From` alone can't carry it. + pub fn from_model_with_series_title(m: release_ledger::Model, series_title: String) -> Self { Self { id: m.id, series_id: m.series_id, + series_title, source_id: m.source_id, external_release_id: m.external_release_id, info_hash: m.info_hash, @@ -184,6 +194,19 @@ pub struct UpdateReleaseSourceRequest { pub poll_interval_s: Option, } +/// Response shape from the `reset` endpoint. +/// +/// Returns the number of ledger rows removed so callers can show a +/// confirmation toast. The source's transient poll state (etag, +/// last_polled_at, last_error, last_summary) is also cleared, but those +/// are not counted here. +#[derive(Debug, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct ResetReleaseSourceResponse { + /// Number of `release_ledger` rows deleted for this source. + pub deleted_ledger_entries: u64, +} + /// Response shape from the `poll-now` endpoint. /// /// `status` is `enqueued` after a successful enqueue. The `message` carries diff --git a/src/api/routes/v1/dto/tracking.rs b/src/api/routes/v1/dto/tracking.rs index 1609fe27..72c2defe 100644 --- a/src/api/routes/v1/dto/tracking.rs +++ b/src/api/routes/v1/dto/tracking.rs @@ -28,9 +28,6 @@ pub struct SeriesTrackingDto { pub series_id: Uuid, /// Whether release tracking is enabled. pub tracked: bool, - /// Publication status: `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`. - #[schema(example = "ongoing")] - pub tracking_status: String, /// Whether to announce new chapters. pub track_chapters: bool, /// Whether to announce new volumes. @@ -66,7 +63,6 @@ impl From for SeriesTrackingDto { Self { series_id: m.series_id, tracked: m.tracked, - tracking_status: m.tracking_status, track_chapters: m.track_chapters, track_volumes: m.track_volumes, latest_known_chapter: m.latest_known_chapter, @@ -88,8 +84,6 @@ impl From for SeriesTrackingDto { #[serde(rename_all = "camelCase")] pub struct UpdateSeriesTrackingRequest { pub tracked: Option, - /// `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`. - pub tracking_status: Option, pub track_chapters: Option, pub track_volumes: Option, /// Use `Some(null)` to clear, `Some()` to set, omit to leave alone. diff --git a/src/api/routes/v1/handlers/releases.rs b/src/api/routes/v1/handlers/releases.rs index d5862fec..4b5e837f 100644 --- a/src/api/routes/v1/handlers/releases.rs +++ b/src/api/routes/v1/handlers/releases.rs @@ -28,7 +28,8 @@ use super::super::dto::common::{ }; use super::super::dto::release::{ PollNowResponse, ReleaseLedgerEntryDto, ReleaseLedgerListResponse, ReleaseSourceDto, - ReleaseSourceListResponse, UpdateReleaseLedgerEntryRequest, UpdateReleaseSourceRequest, + ReleaseSourceListResponse, ResetReleaseSourceResponse, UpdateReleaseLedgerEntryRequest, + UpdateReleaseSourceRequest, }; use super::paginated_response; use crate::api::{ @@ -43,6 +44,40 @@ use crate::db::repositories::{ }; use crate::events::{EntityChangeEvent, EntityEvent}; +/// Hydrate ledger rows with series titles via a single batched lookup. +/// +/// The DTO carries `series_title` so the inbox UI can render a human label +/// without a follow-up call. We do this in the handler (rather than a SQL +/// JOIN in the repo) to keep the repository surface narrow and reuse the +/// existing `SeriesRepository::get_by_ids` batch query. +async fn hydrate_ledger_dtos( + db: &sea_orm::DatabaseConnection, + rows: Vec, +) -> Result, ApiError> { + let mut series_ids: Vec = rows.iter().map(|r| r.series_id).collect(); + series_ids.sort_unstable(); + series_ids.dedup(); + + let title_by_id: std::collections::HashMap = if series_ids.is_empty() { + std::collections::HashMap::new() + } else { + SeriesRepository::get_by_ids(db, &series_ids) + .await + .map_err(|e| ApiError::Internal(format!("Failed to load series titles: {}", e)))? + .into_iter() + .map(|s| (s.id, s.name)) + .collect() + }; + + Ok(rows + .into_iter() + .map(|row| { + let title = title_by_id.get(&row.series_id).cloned().unwrap_or_default(); + ReleaseLedgerEntryDto::from_model_with_series_title(row, title) + }) + .collect()) +} + // ============================================================================= // Per-series ledger // ============================================================================= @@ -99,7 +134,7 @@ pub async fn list_series_releases( ) -> Result { auth.require_permission(&Permission::SeriesRead)?; - SeriesRepository::get_by_id(&state.db, series_id) + let series = SeriesRepository::get_by_id(&state.db, series_id) .await .map_err(|e| ApiError::Internal(format!("Failed to fetch series: {}", e)))? .ok_or_else(|| ApiError::NotFound("Series not found".to_string()))?; @@ -152,7 +187,12 @@ pub async fn list_series_releases( total.div_ceil(page_size) }; - let dtos: Vec = rows.into_iter().map(Into::into).collect(); + // All rows belong to the same series, so we can reuse the title we + // already loaded for the existence check rather than re-fetching it. + let dtos: Vec = rows + .into_iter() + .map(|row| ReleaseLedgerEntryDto::from_model_with_series_title(row, series.name.clone())) + .collect(); let base_path = format!("/api/v1/series/{}/releases", series_id); let mut builder = PaginationLinkBuilder::new(&base_path, page, page_size, total_pages); if let Some(ref s) = params.state { @@ -236,7 +276,7 @@ pub async fn list_release_inbox( total.div_ceil(page_size) }; - let dtos: Vec = rows.into_iter().map(Into::into).collect(); + let dtos = hydrate_ledger_dtos(&state.db, rows).await?; let mut builder = PaginationLinkBuilder::new("/api/v1/releases", page, page_size, total_pages); if let Some(ref s) = params.state { builder = builder.with_param("state", s); @@ -379,14 +419,19 @@ async fn update_state_internal( } })?; - // Look up the series to get library_id for the SSE event payload. If the - // series was deleted concurrently we still return the updated row - - // dropping the event is safe. - if let Ok(Some(series)) = SeriesRepository::get_by_id(&state.db, series_id).await { + // Look up the series for both the SSE event (library_id) and the DTO + // (series_title). If the series was deleted concurrently we still return + // the updated row, dropping the event and using an empty title — the + // ledger row's series_id remains valid for navigation. + let series = SeriesRepository::get_by_id(&state.db, series_id) + .await + .ok() + .flatten(); + if let Some(ref s) = series { let event = EntityChangeEvent { event: EntityEvent::SeriesUpdated { series_id, - library_id: series.library_id, + library_id: s.library_id, fields: Some(vec!["releases".to_string()]), }, timestamp: Utc::now(), @@ -395,7 +440,10 @@ async fn update_state_internal( let _ = state.event_broadcaster.emit(event); } - Ok(Json(updated.into())) + let title = series.map(|s| s.name).unwrap_or_default(); + Ok(Json(ReleaseLedgerEntryDto::from_model_with_series_title( + updated, title, + ))) } // ============================================================================= @@ -543,19 +591,86 @@ pub async fn poll_release_source_now( ))); } - let task_id = crate::scheduler::release_sources::enqueue_poll_now(&state.db, source_id) + let outcome = crate::scheduler::release_sources::enqueue_poll_now(&state.db, source_id) .await .map_err(|e| ApiError::Internal(format!("Failed to enqueue poll task: {}", e)))?; + let (status, message) = if outcome.coalesced { + ( + "already_running".to_string(), + format!( + "A poll for this source is already running (task_id={}); coalesced", + outcome.task_id + ), + ) + } else { + ( + "enqueued".to_string(), + format!("Poll task enqueued (task_id={})", outcome.task_id), + ) + }; + Ok(( StatusCode::ACCEPTED, - Json(PollNowResponse { - status: "enqueued".to_string(), - message: format!("Poll task enqueued (task_id={})", task_id), - }), + Json(PollNowResponse { status, message }), )) } +/// Reset a release source to a clean slate. +/// +/// Deletes every `release_ledger` row owned by the source and clears the +/// source's transient poll state (`etag`, `last_polled_at`, `last_error`, +/// `last_error_at`, `last_summary`). User-managed fields (`enabled`, +/// `poll_interval_s`, `display_name`, `config`) are preserved. +/// +/// Intended for testing/troubleshooting: after a reset, the next poll +/// fetches the upstream feed without an `If-None-Match` header (so no 304 +/// short-circuit) and re-records every release as `announced`. Does NOT +/// auto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after +/// resetting if you want immediate re-fetch. +#[utoipa::path( + post, + path = "/api/v1/release-sources/{source_id}/reset", + params( + ("source_id" = Uuid, Path, description = "Source ID") + ), + responses( + (status = 200, description = "Source reset", body = ResetReleaseSourceResponse), + (status = 404, description = "Source not found"), + (status = 403, description = "PluginsManage permission required"), + ), + security( + ("jwt_bearer" = []), + ("api_key" = []) + ), + tag = "Releases" +)] +pub async fn reset_release_source( + State(state): State>, + auth: AuthContext, + Path(source_id): Path, +) -> Result, ApiError> { + auth.require_permission(&Permission::PluginsManage)?; + + // Confirm existence to return a clean 404. + ReleaseSourceRepository::get_by_id(&state.db, source_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to fetch source: {}", e)))? + .ok_or_else(|| ApiError::NotFound("Release source not found".to_string()))?; + + let deleted = ReleaseLedgerRepository::delete_by_source(&state.db, source_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to clear ledger: {}", e)))?; + + ReleaseSourceRepository::clear_poll_state(&state.db, source_id) + .await + .map_err(|e| ApiError::Internal(format!("Failed to reset source state: {}", e)))?; + + Ok(Json(ResetReleaseSourceResponse { + deleted_ledger_entries: deleted, + })) +} + // ============================================================================= // OpenAPI placeholder // ============================================================================= diff --git a/src/api/routes/v1/handlers/tracking.rs b/src/api/routes/v1/handlers/tracking.rs index 1b35a3cf..d7ad2be2 100644 --- a/src/api/routes/v1/handlers/tracking.rs +++ b/src/api/routes/v1/handlers/tracking.rs @@ -90,7 +90,6 @@ pub async fn get_series_tracking( request_body = UpdateSeriesTrackingRequest, responses( (status = 200, description = "Tracking config updated", body = SeriesTrackingDto), - (status = 400, description = "Invalid tracking_status"), (status = 404, description = "Series not found"), (status = 403, description = "Forbidden"), ), @@ -138,7 +137,6 @@ pub async fn update_series_tracking( let update = TrackingUpdate { tracked: request.tracked, - tracking_status: request.tracking_status, track_chapters: request.track_chapters, track_volumes: request.track_volumes, latest_known_chapter: request.latest_known_chapter, @@ -153,14 +151,7 @@ pub async fn update_series_tracking( let row = SeriesTrackingRepository::upsert(&state.db, series_id, update) .await - .map_err(|e| { - // Surface validation errors (e.g., invalid tracking_status) as 400. - if e.to_string().contains("invalid tracking_status") { - ApiError::BadRequest(e.to_string()) - } else { - ApiError::Internal(format!("Failed to update tracking: {}", e)) - } - })?; + .map_err(|e| ApiError::Internal(format!("Failed to update tracking: {}", e)))?; let event = EntityChangeEvent { event: EntityEvent::SeriesUpdated { diff --git a/src/api/routes/v1/routes/releases.rs b/src/api/routes/v1/routes/releases.rs index f770f6fa..24aa0ade 100644 --- a/src/api/routes/v1/routes/releases.rs +++ b/src/api/routes/v1/routes/releases.rs @@ -47,4 +47,8 @@ pub fn routes(_state: Arc) -> Router> { "/release-sources/{source_id}/poll-now", post(handlers::releases::poll_release_source_now), ) + .route( + "/release-sources/{source_id}/reset", + post(handlers::releases::reset_release_source), + ) } diff --git a/src/db/entities/series_tracking.rs b/src/db/entities/series_tracking.rs index c210e2cb..76aab71f 100644 --- a/src/db/entities/series_tracking.rs +++ b/src/db/entities/series_tracking.rs @@ -18,8 +18,6 @@ pub struct Model { pub series_id: Uuid, /// Whether release tracking is enabled for this series. pub tracked: bool, - /// 'ongoing' | 'complete' | 'hiatus' | 'cancelled' | 'unknown'. - pub tracking_status: String, pub track_chapters: bool, pub track_volumes: bool, /// Latest external chapter (decimal handles 12.5, 110.1, etc.). @@ -59,32 +57,3 @@ impl Related for Entity { } impl ActiveModelBehavior for ActiveModel {} - -/// Canonical strings for `tracking_status`. -pub mod tracking_status { - pub const ONGOING: &str = "ongoing"; - pub const COMPLETE: &str = "complete"; - pub const HIATUS: &str = "hiatus"; - pub const CANCELLED: &str = "cancelled"; - pub const UNKNOWN: &str = "unknown"; - - pub fn is_valid(s: &str) -> bool { - matches!(s, ONGOING | COMPLETE | HIATUS | CANCELLED | UNKNOWN) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn tracking_status_validates_known_values() { - assert!(tracking_status::is_valid("ongoing")); - assert!(tracking_status::is_valid("complete")); - assert!(tracking_status::is_valid("hiatus")); - assert!(tracking_status::is_valid("cancelled")); - assert!(tracking_status::is_valid("unknown")); - assert!(!tracking_status::is_valid("paused")); - assert!(!tracking_status::is_valid("")); - } -} diff --git a/src/db/repositories/release_ledger.rs b/src/db/repositories/release_ledger.rs index f4b55126..80386326 100644 --- a/src/db/repositories/release_ledger.rs +++ b/src/db/repositories/release_ledger.rs @@ -222,6 +222,17 @@ impl ReleaseLedgerRepository { let result = ReleaseLedger::delete_by_id(id).exec(db).await?; Ok(result.rows_affected > 0) } + + /// Delete all ledger rows for a source. Returns the number of rows + /// removed. Used by the source-reset admin endpoint to give testers a + /// clean slate without dropping the source itself. + pub async fn delete_by_source(db: &DatabaseConnection, source_id: Uuid) -> Result { + let result = ReleaseLedger::delete_many() + .filter(release_ledger::Column::SourceId.eq(source_id)) + .exec(db) + .await?; + Ok(result.rows_affected) + } } #[cfg(test)] @@ -500,6 +511,62 @@ mod tests { assert!(rows.is_empty(), "ledger rows cascaded with series"); } + #[tokio::test] + async fn delete_by_source_removes_only_that_sources_rows() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_a) = setup_world(conn).await; + + // Add a second source so we can prove scoping. + let source_b = ReleaseSourceRepository::create( + conn, + NewReleaseSource { + plugin_id: "release-nyaa".to_string(), + source_key: "nyaa:user:other".to_string(), + display_name: "Nyaa - other".to_string(), + kind: kind::RSS_UPLOADER.to_string(), + poll_interval_s: 3600, + enabled: None, + config: None, + }, + ) + .await + .unwrap(); + + ReleaseLedgerRepository::record(conn, entry(series_id, source_a, "rel-1")) + .await + .unwrap(); + ReleaseLedgerRepository::record(conn, entry(series_id, source_a, "rel-2")) + .await + .unwrap(); + ReleaseLedgerRepository::record(conn, entry(series_id, source_b.id, "rel-3")) + .await + .unwrap(); + + let removed = ReleaseLedgerRepository::delete_by_source(conn, source_a) + .await + .unwrap(); + assert_eq!(removed, 2); + + // Source A is empty; source B still has its row. + let after_a = + ReleaseLedgerRepository::list_inbox(conn, LedgerInboxFilter::default(), 100, 0) + .await + .unwrap() + .into_iter() + .filter(|r| r.source_id == source_a) + .count(); + assert_eq!(after_a, 0); + let after_b = + ReleaseLedgerRepository::list_inbox(conn, LedgerInboxFilter::default(), 100, 0) + .await + .unwrap() + .into_iter() + .filter(|r| r.source_id == source_b.id) + .count(); + assert_eq!(after_b, 1); + } + #[tokio::test] async fn cascade_deletes_ledger_when_source_deleted() { let (db, _temp) = create_test_db().await; diff --git a/src/db/repositories/release_sources.rs b/src/db/repositories/release_sources.rs index 1b58b97e..2f0b54cd 100644 --- a/src/db/repositories/release_sources.rs +++ b/src/db/repositories/release_sources.rs @@ -291,6 +291,30 @@ impl ReleaseSourceRepository { let result = ReleaseSources::delete_by_id(id).exec(db).await?; Ok(result.rows_affected > 0) } + + /// Reset all transient poll state on a source: clears `etag`, + /// `last_polled_at`, `last_error`, `last_error_at`, and `last_summary`. + /// Leaves user-managed fields (`enabled`, `poll_interval_s`, + /// `display_name`, `config`) untouched. + /// + /// Used by the source-reset admin endpoint so a forced re-poll fetches + /// the upstream feed afresh (no `If-None-Match` 304) and re-records + /// every release as `announced`. + pub async fn clear_poll_state(db: &DatabaseConnection, id: Uuid) -> Result<()> { + let existing = ReleaseSources::find_by_id(id) + .one(db) + .await? + .ok_or_else(|| anyhow::anyhow!("release source {} not found", id))?; + let mut active: release_sources::ActiveModel = existing.into(); + active.last_polled_at = Set(None); + active.last_error = Set(None); + active.last_error_at = Set(None); + active.etag = Set(None); + active.last_summary = Set(None); + active.updated_at = Set(Utc::now()); + active.update(db).await?; + Ok(()) + } } #[cfg(test)] @@ -635,6 +659,54 @@ mod tests { assert_eq!(mu.len(), 1); } + #[tokio::test] + async fn clear_poll_state_resets_transient_fields_only() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let s = ReleaseSourceRepository::create(conn, nyaa_source()) + .await + .unwrap(); + + // Seed some poll state and a user override. + ReleaseSourceRepository::record_poll_success( + conn, + s.id, + Utc::now(), + Some("\"etag-1\"".to_string()), + Some("Fetched 3 items".to_string()), + ) + .await + .unwrap(); + ReleaseSourceRepository::update( + conn, + s.id, + ReleaseSourceUpdate { + enabled: Some(false), + poll_interval_s: Some(900), + ..Default::default() + }, + ) + .await + .unwrap(); + + ReleaseSourceRepository::clear_poll_state(conn, s.id) + .await + .unwrap(); + + let after = ReleaseSourceRepository::get_by_id(conn, s.id) + .await + .unwrap() + .unwrap(); + assert!(after.etag.is_none()); + assert!(after.last_polled_at.is_none()); + assert!(after.last_error.is_none()); + assert!(after.last_error_at.is_none()); + assert!(after.last_summary.is_none()); + // User-managed fields preserved. + assert!(!after.enabled); + assert_eq!(after.poll_interval_s, 900); + } + #[tokio::test] async fn delete_removes_row() { let (db, _temp) = create_test_db().await; diff --git a/src/db/repositories/series_tracking.rs b/src/db/repositories/series_tracking.rs index 2dab7486..450c03e1 100644 --- a/src/db/repositories/series_tracking.rs +++ b/src/db/repositories/series_tracking.rs @@ -14,7 +14,7 @@ use sea_orm::{ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, Qu use uuid::Uuid; use crate::db::entities::series_tracking::{ - self, Entity as SeriesTracking, Model as SeriesTrackingRow, tracking_status, + self, Entity as SeriesTracking, Model as SeriesTrackingRow, }; /// Parameters for upserting a tracking row. Each `Option>` distinguishes @@ -22,7 +22,6 @@ use crate::db::entities::series_tracking::{ #[derive(Debug, Default, Clone)] pub struct TrackingUpdate { pub tracked: Option, - pub tracking_status: Option, pub track_chapters: Option, pub track_volumes: Option, /// Outer `None` = leave alone; inner `None` = clear. @@ -61,7 +60,6 @@ impl SeriesTrackingRepository { Ok(SeriesTrackingRow { series_id, tracked: false, - tracking_status: tracking_status::UNKNOWN.to_string(), track_chapters: true, track_volumes: true, latest_known_chapter: None, @@ -82,13 +80,6 @@ impl SeriesTrackingRepository { series_id: Uuid, update: TrackingUpdate, ) -> Result { - // Validate tracking_status before doing any DB work. - if let Some(ref status) = update.tracking_status - && !tracking_status::is_valid(status) - { - anyhow::bail!("invalid tracking_status: {}", status); - } - let now = Utc::now(); let existing = SeriesTracking::find_by_id(series_id).one(db).await?; @@ -98,9 +89,6 @@ impl SeriesTrackingRepository { if let Some(v) = update.tracked { active.tracked = Set(v); } - if let Some(v) = update.tracking_status { - active.tracking_status = Set(v); - } if let Some(v) = update.track_chapters { active.track_chapters = Set(v); } @@ -133,9 +121,6 @@ impl SeriesTrackingRepository { let active = series_tracking::ActiveModel { series_id: Set(series_id), tracked: Set(update.tracked.unwrap_or(false)), - tracking_status: Set(update - .tracking_status - .unwrap_or_else(|| tracking_status::UNKNOWN.to_string())), track_chapters: Set(update.track_chapters.unwrap_or(true)), track_volumes: Set(update.track_volumes.unwrap_or(true)), latest_known_chapter: Set(update.latest_known_chapter.unwrap_or(None)), @@ -267,7 +252,6 @@ mod tests { .unwrap(); assert_eq!(row.series_id, series_id); assert!(!row.tracked); - assert_eq!(row.tracking_status, "unknown"); assert!(row.track_chapters); assert!(row.track_volumes); } @@ -283,7 +267,6 @@ mod tests { series_id, TrackingUpdate { tracked: Some(true), - tracking_status: Some("ongoing".to_string()), latest_known_chapter: Some(Some(142.0)), ..Default::default() }, @@ -291,7 +274,6 @@ mod tests { .await .unwrap(); assert!(row.tracked); - assert_eq!(row.tracking_status, "ongoing"); assert_eq!(row.latest_known_chapter, Some(142.0)); // Second upsert updates only specified fields. @@ -306,10 +288,6 @@ mod tests { .await .unwrap(); assert!(row2.tracked, "tracked should be preserved"); - assert_eq!( - row2.tracking_status, "ongoing", - "status should be preserved" - ); assert_eq!(row2.latest_known_chapter, Some(143.0)); } @@ -343,24 +321,6 @@ mod tests { assert_eq!(cleared.latest_known_chapter, None); } - #[tokio::test] - async fn upsert_rejects_invalid_status() { - let (db, _temp) = create_test_db().await; - let series_id = make_series(db.sea_orm_connection()).await; - - let err = SeriesTrackingRepository::upsert( - db.sea_orm_connection(), - series_id, - TrackingUpdate { - tracking_status: Some("paused".to_string()), - ..Default::default() - }, - ) - .await - .unwrap_err(); - assert!(err.to_string().contains("invalid tracking_status")); - } - #[tokio::test] async fn set_tracked_toggles_flag() { let (db, _temp) = create_test_db().await; @@ -516,7 +476,7 @@ mod tests { conn, series_id, TrackingUpdate { - tracking_status: Some("ongoing".to_string()), + tracked: Some(true), ..Default::default() }, ) diff --git a/src/db/repositories/task.rs b/src/db/repositories/task.rs index 65e3ab8e..3e984943 100644 --- a/src/db/repositories/task.rs +++ b/src/db/repositories/task.rs @@ -452,6 +452,75 @@ impl TaskRepository { Ok(result.is_some()) } + /// Find a pending or processing task by `task_type` and a single + /// JSON-param key/value match. Returns the first matching task ID, if + /// any. Used by enqueue paths that want to coalesce concurrent + /// requests onto an in-flight task instead of stacking duplicates. + /// + /// `param_value` is matched as a string against `params->>key`. UUIDs + /// should be passed as their canonical hyphenated form. + pub async fn find_pending_or_processing_by_param( + db: &DatabaseConnection, + task_type: &str, + param_key: &str, + param_value: &str, + ) -> Result> { + let backend = db.get_database_backend(); + let stmt = match backend { + DbBackend::Postgres => Statement::from_sql_and_values( + DbBackend::Postgres, + r#"SELECT id FROM tasks + WHERE task_type = $1 + AND status IN ('pending', 'processing') + AND params->>$2 = $3 + ORDER BY created_at ASC + LIMIT 1"#, + vec![task_type.into(), param_key.into(), param_value.into()], + ), + _ => { + // SQLite's json_extract path needs a string literal, not a + // bind parameter, so we splice the key into the JSON path. + // Reject anything that isn't a simple identifier to avoid + // injection — callers pass static keys (`source_id`, etc.). + if !param_key + .chars() + .all(|c| c.is_ascii_alphanumeric() || c == '_') + { + anyhow::bail!("invalid param_key: {}", param_key); + } + let path = format!("$.{}", param_key); + Statement::from_sql_and_values( + DbBackend::Sqlite, + format!( + r#"SELECT id FROM tasks + WHERE task_type = ? + AND status IN ('pending', 'processing') + AND json_extract(params, '{}') = ? + ORDER BY created_at ASC + LIMIT 1"#, + path + ), + vec![task_type.into(), param_value.into()], + ) + } + }; + + let result = db + .query_one(stmt) + .await + .context("Failed to query for in-flight task")?; + match result { + Some(row) => { + let task_id: Uuid = row.try_get::("", "id").or_else(|_| { + let id_str: String = row.try_get("", "id")?; + Uuid::parse_str(&id_str).map_err(|e| sea_orm::DbErr::Type(e.to_string())) + })?; + Ok(Some(task_id)) + } + None => Ok(None), + } + } + /// Find a pending or processing task with matching params, returning its ID and status. /// /// Like `has_pending_or_processing` but returns the task ID and status string diff --git a/src/events/types.rs b/src/events/types.rs index 54a06ec8..9b7e547a 100644 --- a/src/events/types.rs +++ b/src/events/types.rs @@ -195,6 +195,26 @@ pub enum EntityEvent { /// preference filters. language: String, }, + /// A release source's poll task completed. + /// + /// Emitted at the end of every `poll_release_source` task run, after + /// `release_sources.last_summary` / `last_polled_at` / `etag` have been + /// persisted. The frontend uses this to refresh the Release tracking + /// settings page in real time so users don't have to reload to see a + /// "Poll now" finish. Carries no diff details — receivers should + /// invalidate the source query and re-read the row. + ReleaseSourcePolled { + #[serde(rename = "sourceId")] + source_id: Uuid, + /// Plugin that owns the source (`release_sources.plugin_id`). + /// Cheap filter for clients only watching certain plugins. + #[serde(rename = "pluginId")] + plugin_id: String, + /// `true` if the poll wrote a `last_error`. Cheap "did it fail" + /// hint without forcing the client to refetch. + #[serde(rename = "hadError")] + had_error: bool, + }, /// Internal signal to indicate shutdown (not sent to clients) #[serde(skip)] Shutdown, @@ -244,6 +264,7 @@ impl EntityChangeEvent { | EntityEvent::PluginDisabled { .. } | EntityEvent::PluginDeleted { .. } | EntityEvent::ReleaseAnnounced { .. } + | EntityEvent::ReleaseSourcePolled { .. } | EntityEvent::Shutdown => None, } } @@ -283,6 +304,22 @@ impl EntityChangeEvent { None, ) } + + /// Build a `ReleaseSourcePolled` event for the end of a poll task run. + /// + /// Carries only IDs and a single boolean error hint; receivers should + /// invalidate any cached `release_sources` query and re-read the row + /// for fresh `last_summary` / `last_polled_at` / etc. + pub fn release_source_polled(source_id: Uuid, plugin_id: &str, had_error: bool) -> Self { + Self::new( + EntityEvent::ReleaseSourcePolled { + source_id, + plugin_id: plugin_id.to_string(), + had_error, + }, + None, + ) + } } /// Task progress event for background operations diff --git a/src/scheduler/release_sources.rs b/src/scheduler/release_sources.rs index dc8f106b..8daa132d 100644 --- a/src/scheduler/release_sources.rs +++ b/src/scheduler/release_sources.rs @@ -244,14 +244,55 @@ fn derive_url_hint(source: &crate::db::entities::release_sources::Model) -> Stri source.plugin_id.clone() } +/// Outcome of an `enqueue_poll_now` call. +#[derive(Debug, Clone, Copy)] +pub struct EnqueuePollOutcome { + /// The ID of the task — either the freshly enqueued one or the + /// in-flight task we coalesced onto. + pub task_id: Uuid, + /// `true` when a pending/processing task already existed for this + /// source and we returned its ID instead of enqueuing a new one. + pub coalesced: bool, +} + /// Wrapper for callers (e.g., HTTP handlers) that want to enqueue a poll /// directly instead of waiting for the scheduler tick. -pub async fn enqueue_poll_now(db: &DatabaseConnection, source_id: Uuid) -> Result { +/// +/// **Dedup**: if a `poll_release_source` task for the same `source_id` is +/// already pending or processing, returns that task's ID instead of +/// enqueuing another one. This guards against the "click Poll now twice +/// and only one finishes" footgun: with a worker pool size > 1, two +/// independent tasks for the same source would race on `last_summary` / +/// `last_polled_at` writes and overlap upstream fetches. Coalescing onto +/// the in-flight task gives the user the same UX (their click acks) and +/// keeps the source's state coherent. +pub async fn enqueue_poll_now( + db: &DatabaseConnection, + source_id: Uuid, +) -> Result { + if let Some(existing) = TaskRepository::find_pending_or_processing_by_param( + db, + "poll_release_source", + "source_id", + &source_id.to_string(), + ) + .await + .context("Failed to check for in-flight poll task")? + { + return Ok(EnqueuePollOutcome { + task_id: existing, + coalesced: true, + }); + } + let task_type = TaskType::PollReleaseSource { source_id }; let task_id = TaskRepository::enqueue(db, task_type, None) .await .context("Failed to enqueue PollReleaseSource task")?; - Ok(task_id) + Ok(EnqueuePollOutcome { + task_id, + coalesced: false, + }) } #[cfg(test)] diff --git a/src/services/plugin/protocol.rs b/src/services/plugin/protocol.rs index e388158a..488b8ecd 100644 --- a/src/services/plugin/protocol.rs +++ b/src/services/plugin/protocol.rs @@ -1448,6 +1448,12 @@ pub struct ReleasePollRequest { /// reverse-RPC channel is open). The `candidates` field is convenience for /// plugins that prefer to return everything at once; both styles are /// supported and the host treats them identically. +/// +/// Plugins that stream via `releases/record` should also populate the +/// counter fields (`parsed`, `matched`, `recorded`, `deduped`) so the host +/// can build an accurate `last_summary` for the source. Without those, the +/// host can only see what came back in `candidates` and a streaming +/// plugin's status badge will read "Fetched 0 items" no matter what. #[derive(Debug, Clone, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ReleasePollResponse { @@ -1470,6 +1476,27 @@ pub struct ReleasePollResponse { /// the host's per-host backoff layer to detect 429 / 503. #[serde(default, skip_serializing_if = "Option::is_none")] pub upstream_status: Option, + /// Items the plugin parsed from the upstream feed before any matching + /// or threshold filtering. Streaming plugins should populate this so + /// the host's `last_summary` reflects upstream activity, not just the + /// shape of the response payload. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub parsed: Option, + /// Of those parsed, the count that matched a tracked series alias + /// (i.e. that became candidates the plugin then evaluated/streamed). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub matched: Option, + /// Of those matched, the count actually inserted into the ledger + /// (excludes dedupes). For plugins that stream via `releases/record`, + /// this is the count of non-deduped record outcomes. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub recorded: Option, + /// Of those matched, the count that the host deduped onto an existing + /// ledger row. Optional; when omitted the host infers `matched - + /// recorded`. Provided explicitly by streaming plugins that already + /// know. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub deduped: Option, } // ============================================================================= diff --git a/src/services/plugin/releases_handler.rs b/src/services/plugin/releases_handler.rs index 402e9423..f1099f90 100644 --- a/src/services/plugin/releases_handler.rs +++ b/src/services/plugin/releases_handler.rs @@ -199,23 +199,31 @@ impl ReleasesRequestHandler { match SeriesExternalIdRepository::get_for_series(&self.db, entry.series_id).await { Ok(rows) => { // Filter: only sources the plugin asked for. - // Source naming convention: `plugin:` for - // plugin-provided IDs; we accept either bare source - // names (e.g. "mangaupdates") or the prefixed form. + // + // Two namespace conventions exist in stored + // `series_external_ids.source` strings: + // + // - `api:` (used by metadata plugins + // like MangaBaka, OpenLibrary, AniList — this is + // the dominant convention and the SDK docs). + // - `plugin:` (legacy / plugin-private). + // + // Plugin manifests declare `requiresExternalIds` + // with the bare service name (e.g. "mangaupdates"), + // so we strip both prefixes before matching. The + // returned map is keyed by the bare name so plugins + // can read `externalIds["mangaupdates"]` regardless + // of how the row was stored. let mut by_source: HashMap = HashMap::new(); for row in rows { - let normalized = row - .source - .strip_prefix("plugin:") - .unwrap_or(&row.source) - .to_string(); + let normalized = strip_external_id_namespace(&row.source); if self .capability .requires_external_ids .iter() - .any(|req| req == &normalized) + .any(|req| req == normalized) { - by_source.insert(normalized, row.external_id); + by_source.insert(normalized.to_string(), row.external_id); } } if !by_source.is_empty() { @@ -827,6 +835,28 @@ struct SourceStateView { last_error_at: Option>, } +/// Strip a leading namespace prefix (`api:`, `plugin:`) from an external-ID +/// `source` string and return the bare service name. +/// +/// Stored `series_external_ids.source` values use one of: +/// - `api:` (dominant; written by metadata plugins like +/// MangaBaka, OpenLibrary, AniList). +/// - `plugin:` (legacy plugin-private form). +/// - `` (bare; older rows). +/// +/// Plugin manifests declare `requiresExternalIds` with the bare service +/// name, so we normalize on read. Anything else (`urn:...`, `mal:`, etc.) +/// passes through unchanged. +pub(crate) fn strip_external_id_namespace(source: &str) -> &str { + if let Some(rest) = source.strip_prefix("api:") { + return rest; + } + if let Some(rest) = source.strip_prefix("plugin:") { + return rest; + } + source +} + // ============================================================================= // Param parsing helpers // ============================================================================= @@ -1051,6 +1081,62 @@ mod tests { assert!(ext.get("anilist").is_none()); } + #[tokio::test] + async fn list_tracked_accepts_api_prefixed_external_ids() { + // Regression: MangaBaka writes external IDs as `api:mangaupdates` + // (the dominant convention per the SDK docs). The host used to + // strip only `plugin:`, so MangaUpdates plugins received zero IDs + // and reported "Fetched 0 items" forever. Strip both prefixes. + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_id) = setup(conn, "release-mu").await; + + SeriesExternalIdRepository::upsert( + conn, + series_id, + "api:mangaupdates", + "12345", + None, + None, + ) + .await + .unwrap(); + + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-mu".to_string(), + make_capability(false, vec!["mangaupdates"]), + ); + let req = make_request( + methods::RELEASES_LIST_TRACKED, + json!({"sourceId": source_id}), + ); + let resp = handler.handle_request(&req).await; + let body: ListTrackedResponse = serde_json::from_value(resp.result.unwrap()).unwrap(); + let ext = body.tracked[0].external_ids.as_ref().unwrap(); + assert_eq!( + ext.get("mangaupdates").map(String::as_str), + Some("12345"), + "api: prefix should be stripped to match bare-name manifest declaration" + ); + } + + #[test] + fn strip_external_id_namespace_handles_known_prefixes() { + assert_eq!( + strip_external_id_namespace("api:mangaupdates"), + "mangaupdates" + ); + assert_eq!(strip_external_id_namespace("plugin:anilist"), "anilist"); + assert_eq!(strip_external_id_namespace("mangadex"), "mangadex"); + // Unknown prefixes pass through — we'd rather fail closed than guess. + assert_eq!( + strip_external_id_namespace("urn:isbn:1234"), + "urn:isbn:1234" + ); + assert_eq!(strip_external_id_namespace(""), ""); + } + #[tokio::test] async fn list_tracked_rejects_source_owned_by_other_plugin() { let (db, _t) = create_test_db().await; diff --git a/src/services/release/upstream_gap.rs b/src/services/release/upstream_gap.rs index 5d2eb525..e9bf97dd 100644 --- a/src/services/release/upstream_gap.rs +++ b/src/services/release/upstream_gap.rs @@ -167,7 +167,6 @@ mod tests { SeriesTrackingRow { series_id: Uuid::new_v4(), tracked, - tracking_status: "ongoing".to_string(), track_chapters, track_volumes, latest_known_chapter: None, diff --git a/src/tasks/handlers/poll_release_source.rs b/src/tasks/handlers/poll_release_source.rs index 327b838d..55f88728 100644 --- a/src/tasks/handlers/poll_release_source.rs +++ b/src/tasks/handlers/poll_release_source.rs @@ -58,14 +58,21 @@ const DEFAULT_TASK_TIMEOUT_SECS: u64 = 300; #[serde(rename_all = "camelCase")] pub struct PollReleaseSourceResult { pub source_id: Uuid, - /// Number of candidates the plugin returned in its response payload. + /// Number of upstream items the plugin produced for this poll. Counts + /// both candidates returned inline in the response payload AND any + /// items the plugin streamed via `releases/record` (reported back via + /// `ReleasePollResponse.parsed`). Used to drive the `last_summary` + /// "Fetched N items" line. pub candidates_returned: u32, /// Number of candidates accepted by the matcher and recorded. + /// Includes both host-side records (from inline `response.candidates`) + /// and plugin-streamed records (from `ReleasePollResponse.recorded`). pub candidates_recorded: u32, /// Number of candidates dropped before the ledger (validation failures /// or below-threshold). pub candidates_rejected: u32, - /// Number of accepted candidates that landed as a duplicate. + /// Number of accepted candidates that landed as a duplicate. Includes + /// host-side dedupes and plugin-reported dedupes. pub candidates_deduped: u32, /// Whether the upstream returned `304 Not Modified` (or the plugin's /// equivalent). @@ -201,13 +208,13 @@ impl TaskHandler for PollReleaseSourceHandler { Ok(None) => { let msg = format!("plugin {} not registered", source.plugin_id); warn!("Task {}: {}", task.id, msg); - record_error(db, source.id, &msg).await; + record_error(db, &source, event_broadcaster, &msg).await; return Ok(TaskResult::failure(msg)); } Err(e) => { let msg = format!("failed to lookup plugin: {}", e); error!("Task {}: {}", task.id, msg); - record_error(db, source.id, &msg).await; + record_error(db, &source, event_broadcaster, &msg).await; return Ok(TaskResult::failure(msg)); } }; @@ -231,7 +238,7 @@ impl TaskHandler for PollReleaseSourceHandler { Err(e) => { let msg = format!("failed to start plugin: {}", e); error!("Task {}: {}", task.id, msg); - record_error(db, source.id, &msg).await; + record_error(db, &source, event_broadcaster, &msg).await; return Ok(TaskResult::failure(msg)); } }; @@ -254,7 +261,7 @@ impl TaskHandler for PollReleaseSourceHandler { Err(_) => { let msg = format!("poll timed out after {:?}", t); warn!("Task {}: {}", task.id, msg); - record_error(db, source.id, &msg).await; + record_error(db, &source, event_broadcaster, &msg).await; return Ok(TaskResult::failure(msg)); } } @@ -279,7 +286,7 @@ impl TaskHandler for PollReleaseSourceHandler { ); } error!("Task {}: {}", task.id, msg); - record_error(db, source.id, &msg).await; + record_error(db, &source, event_broadcaster, &msg).await; return Ok(TaskResult::failure(msg)); } }; @@ -323,6 +330,14 @@ impl TaskHandler for PollReleaseSourceHandler { let response_etag = response.etag.clone(); let response_not_modified = response.not_modified; let response_upstream_status = response.upstream_status; + // Plugin-reported counters (populated by streaming plugins that + // record via `releases/record` mid-poll, since their response's + // `candidates` array is empty). When present, these win over the + // host-side count below. + let plugin_reported_parsed = response.parsed; + let plugin_reported_matched = response.matched; + let plugin_reported_recorded = response.recorded; + let plugin_reported_deduped = response.deduped; let mut result = PollReleaseSourceResult { source_id, @@ -379,6 +394,14 @@ impl TaskHandler for PollReleaseSourceHandler { } } + fold_streaming_counters( + &mut result, + plugin_reported_parsed, + plugin_reported_matched, + plugin_reported_recorded, + plugin_reported_deduped, + ); + // Persist source state. If we hit a successful 2xx upstream we // already noted it for backoff; clear `last_error` and stamp // `last_polled_at`. The one-line `summary` is surfaced in the @@ -421,6 +444,21 @@ impl TaskHandler for PollReleaseSourceHandler { self.backoff.record_success(&url_hint).await; } + // Emit a `ReleaseSourcePolled` event so the Release tracking + // settings page refreshes the row in real time. Best-effort: + // missing subscribers are a benign noop, the persisted source + // state is the source of truth. + let had_error = response_upstream_status + .map(is_backoff_status) + .unwrap_or(false); + if let Some(b) = event_broadcaster { + let _ = b.emit(EntityChangeEvent::release_source_polled( + source.id, + &source.plugin_id, + had_error, + )); + } + let message = format!( "Polled {}: returned {}, recorded {}, deduped {}, rejected {}", source.display_name, @@ -435,6 +473,40 @@ impl TaskHandler for PollReleaseSourceHandler { } } +/// Merge plugin-reported counters from `ReleasePollResponse` into the +/// host's running `PollReleaseSourceResult`. +/// +/// Streaming plugins (Nyaa, MangaUpdates) record via `releases/record` +/// mid-poll and return an empty `candidates` array. They report what they +/// saw via the response's optional counter fields; the host's `result` +/// already counts whatever came back inline in `candidates`, so we +/// **additively merge** the two so a plugin that mixes both modes gets a +/// correct summary. +/// +/// `deduped` falls back to `matched - recorded` when the plugin only sent +/// the latter two — older plugins won't know about the dedicated field. +pub(crate) fn fold_streaming_counters( + result: &mut PollReleaseSourceResult, + parsed: Option, + matched: Option, + recorded: Option, + deduped: Option, +) { + if let Some(p) = parsed { + result.candidates_returned = result.candidates_returned.saturating_add(p); + } + if let Some(r) = recorded { + result.candidates_recorded = result.candidates_recorded.saturating_add(r); + } + if let Some(d) = deduped { + result.candidates_deduped = result.candidates_deduped.saturating_add(d); + } else if let (Some(m), Some(r)) = (matched, recorded) + && m >= r + { + result.candidates_deduped = result.candidates_deduped.saturating_add(m - r); + } +} + /// Build the one-line `last_summary` string written to `release_sources` /// after a successful poll, intended for direct display under the Release /// tracking row's status badge. @@ -514,15 +586,30 @@ fn derive_url_hint(source: &crate::db::entities::release_sources::Model) -> Stri source.plugin_id.clone() } -async fn record_error(db: &DatabaseConnection, source_id: Uuid, message: &str) { +async fn record_error( + db: &DatabaseConnection, + source: &crate::db::entities::release_sources::Model, + event_broadcaster: Option<&Arc>, + message: &str, +) { if let Err(e) = - ReleaseSourceRepository::record_poll_error(db, source_id, message, Utc::now()).await + ReleaseSourceRepository::record_poll_error(db, source.id, message, Utc::now()).await { warn!( "Failed to persist poll error on source {}: {}", - source_id, e + source.id, e ); } + // Emit a `ReleaseSourcePolled` event so the Release tracking settings + // page refreshes in real time. Best-effort: missing subscribers are a + // benign noop, the persisted state is the source of truth. + if let Some(b) = event_broadcaster { + let _ = b.emit(EntityChangeEvent::release_source_polled( + source.id, + &source.plugin_id, + true, // had_error + )); + } } #[cfg(test)] @@ -886,4 +973,64 @@ mod tests { let s = build_poll_summary(None, Some(200), &r); assert_eq!(s, "Fetched 2 items, recorded 2"); } + + // ------------------------------------------------------------------------- + // fold_streaming_counters — protects against the regression where a + // streaming plugin (Nyaa, MangaUpdates) records via reverse-RPC and the + // host's summary always reads "Fetched 0 items" because the response's + // `candidates` array was empty. + // ------------------------------------------------------------------------- + + #[test] + fn fold_streaming_counters_adds_plugin_reported_values() { + let mut r = empty_result(); + fold_streaming_counters(&mut r, Some(12), Some(3), Some(1), Some(2)); + assert_eq!(r.candidates_returned, 12); + assert_eq!(r.candidates_recorded, 1); + assert_eq!(r.candidates_deduped, 2); + } + + #[test] + fn fold_streaming_counters_infers_deduped_when_only_matched_and_recorded() { + let mut r = empty_result(); + fold_streaming_counters(&mut r, Some(10), Some(8), Some(3), None); + assert_eq!(r.candidates_returned, 10); + assert_eq!(r.candidates_recorded, 3); + assert_eq!(r.candidates_deduped, 5, "matched - recorded fallback"); + } + + #[test] + fn fold_streaming_counters_handles_absent_fields_for_older_plugins() { + let mut r = empty_result(); + r.candidates_returned = 4; + r.candidates_recorded = 4; + fold_streaming_counters(&mut r, None, None, None, None); + assert_eq!(r.candidates_returned, 4, "host counts preserved"); + assert_eq!(r.candidates_recorded, 4); + assert_eq!(r.candidates_deduped, 0); + } + + #[test] + fn fold_streaming_counters_additively_merges_with_inline_candidates() { + let mut r = empty_result(); + // Host counted some inline candidates already. + r.candidates_returned = 2; + r.candidates_recorded = 2; + // Plugin also streamed a few. + fold_streaming_counters(&mut r, Some(3), Some(3), Some(2), Some(1)); + assert_eq!(r.candidates_returned, 5); + assert_eq!(r.candidates_recorded, 4); + assert_eq!(r.candidates_deduped, 1); + } + + #[test] + fn build_poll_summary_uses_streaming_counters_via_fold() { + // Pin the end-to-end shape: a streaming plugin returns no inline + // candidates but reports it parsed 5 and recorded 5 — the badge + // must say "Fetched 5 items, recorded 5", not "Fetched 0 items". + let mut r = empty_result(); + fold_streaming_counters(&mut r, Some(5), Some(5), Some(5), Some(0)); + let s = build_poll_summary(None, Some(200), &r); + assert_eq!(s, "Fetched 5 items, recorded 5"); + } } diff --git a/tests/api/releases.rs b/tests/api/releases.rs index 41c1b6c4..6e09c367 100644 --- a/tests/api/releases.rs +++ b/tests/api/releases.rs @@ -6,7 +6,7 @@ mod common; use codex::api::error::ErrorResponse; use codex::api::routes::v1::dto::release::{ PollNowResponse, ReleaseLedgerEntryDto, ReleaseSourceDto, ReleaseSourceListResponse, - UpdateReleaseLedgerEntryRequest, UpdateReleaseSourceRequest, + ResetReleaseSourceResponse, UpdateReleaseLedgerEntryRequest, UpdateReleaseSourceRequest, }; use codex::db::ScanningStrategy; use codex::db::entities::release_sources::kind; @@ -145,6 +145,10 @@ async fn list_series_releases_returns_entries_for_series() { assert_eq!(body.data.len(), 2); for entry in &body.data { assert_eq!(entry.series_id, series); + assert_eq!( + entry.series_title, "Series", + "DTO should carry the series title joined from the series row" + ); } } @@ -274,6 +278,10 @@ async fn inbox_filters_by_series() { let body = body.unwrap(); assert_eq!(body.total, 1); assert_eq!(body.data[0].external_release_id, "rel-1"); + assert_eq!( + body.data[0].series_title, "Series", + "inbox DTO should carry the series title for cross-series rendering" + ); } // ============================================================================= @@ -550,6 +558,60 @@ async fn poll_now_enqueues_task_when_source_exists() { ); } +#[tokio::test] +async fn poll_now_dedupes_concurrent_requests_onto_in_flight_task() { + // Regression: clicking "Poll now" twice quickly previously enqueued + // two independent tasks. With worker_count >= 2 they'd race on + // last_summary / last_polled_at writes and overlap upstream fetches. + // We now coalesce onto the existing pending/processing task. + use codex::db::repositories::TaskRepository; + + let (db, _temp) = setup_test_db().await; + let id = make_source(&db, "nyaa:user:tsuna69").await; + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app1 = create_test_router(state.clone()).await; + let app2 = create_test_router(state).await; + + // First click: enqueues a fresh task. + let req = post_request_with_auth(&format!("/api/v1/release-sources/{}/poll-now", id), &token); + let (s1, b1): (StatusCode, Option) = make_json_request(app1, req).await; + assert_eq!(s1, StatusCode::ACCEPTED); + let b1 = b1.unwrap(); + assert_eq!(b1.status, "enqueued"); + + // Second click while the first is still pending: coalesce. + let req = post_request_with_auth(&format!("/api/v1/release-sources/{}/poll-now", id), &token); + let (s2, b2): (StatusCode, Option) = make_json_request(app2, req).await; + assert_eq!(s2, StatusCode::ACCEPTED); + let b2 = b2.unwrap(); + assert_eq!( + b2.status, "already_running", + "second poll-now must coalesce onto the in-flight task" + ); + assert!( + b2.message.contains("coalesced"), + "human-readable message should explain the coalesce" + ); + + // Only one task should sit on the queue, not two. + let pending = TaskRepository::list( + &db, + Some("pending".to_string()), + Some("poll_release_source".to_string()), + Some(10), + ) + .await + .unwrap(); + assert_eq!( + pending.len(), + 1, + "duplicate poll-now must not stack tasks; got {} pending", + pending.len() + ); +} + #[tokio::test] async fn poll_now_conflicts_when_source_disabled() { use codex::db::repositories::{ReleaseSourceRepository, ReleaseSourceUpdate}; @@ -605,3 +667,126 @@ async fn poll_now_requires_plugins_manage() { let (status, _): (StatusCode, Option) = make_json_request(app, req).await; assert_eq!(status, StatusCode::FORBIDDEN); } + +// ============================================================================= +// POST /release-sources/{id}/reset +// ============================================================================= + +#[tokio::test] +async fn reset_clears_ledger_rows_and_poll_state() { + let (db, _temp) = setup_test_db().await; + let series = make_series(&db).await; + let source = make_source(&db, "nyaa:user:tsuna69").await; + let other_source = make_source(&db, "nyaa:user:other").await; + + record_announced(&db, series, source, "rel-1").await; + record_announced(&db, series, source, "rel-2").await; + // A row on a different source must NOT be touched. + record_announced(&db, series, other_source, "rel-keep").await; + + // Seed poll state on the target source so we can prove it's cleared. + ReleaseSourceRepository::record_poll_success( + &db, + source, + chrono::Utc::now(), + Some("\"etag-1\"".to_string()), + Some("Fetched 2 items".to_string()), + ) + .await + .unwrap(); + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = post_request_with_auth(&format!("/api/v1/release-sources/{}/reset", source), &token); + let (status, body): (StatusCode, Option) = + make_json_request(app, req).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(body.unwrap().deleted_ledger_entries, 2); + + // Target source: ledger rows gone, poll state cleared. + let after = ReleaseSourceRepository::get_by_id(&db, source) + .await + .unwrap() + .unwrap(); + assert!(after.etag.is_none()); + assert!(after.last_polled_at.is_none()); + assert!(after.last_summary.is_none()); + + // Other source's row survives. + let surviving = ReleaseLedgerRepository::list_for_series(&db, series, None, 100, 0) + .await + .unwrap(); + assert_eq!(surviving.len(), 1); + assert_eq!(surviving[0].source_id, other_source); + assert_eq!(surviving[0].external_release_id, "rel-keep"); +} + +#[tokio::test] +async fn reset_preserves_user_managed_source_fields() { + use codex::db::repositories::ReleaseSourceUpdate; + + let (db, _temp) = setup_test_db().await; + let source = make_source(&db, "nyaa:user:tsuna69").await; + + // Admin disables the source and overrides the interval. + ReleaseSourceRepository::update( + &db, + source, + ReleaseSourceUpdate { + enabled: Some(false), + poll_interval_s: Some(900), + display_name: Some("Custom Name".to_string()), + ..Default::default() + }, + ) + .await + .unwrap(); + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = post_request_with_auth(&format!("/api/v1/release-sources/{}/reset", source), &token); + let (status, _): (StatusCode, Option) = + make_json_request(app, req).await; + assert_eq!(status, StatusCode::OK); + + let after = ReleaseSourceRepository::get_by_id(&db, source) + .await + .unwrap() + .unwrap(); + assert!(!after.enabled, "user-set enabled flag must survive a reset"); + assert_eq!(after.poll_interval_s, 900, "interval override survives"); + assert_eq!(after.display_name, "Custom Name", "display name preserved"); +} + +#[tokio::test] +async fn reset_404_when_source_missing() { + let (db, _temp) = setup_test_db().await; + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = post_request_with_auth( + &format!("/api/v1/release-sources/{}/reset", Uuid::new_v4()), + &token, + ); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::NOT_FOUND); +} + +#[tokio::test] +async fn reset_requires_plugins_manage() { + let (db, _temp) = setup_test_db().await; + let id = make_source(&db, "nyaa:user:tsuna69").await; + + let state = create_test_auth_state(db.clone()).await; + let token = create_reader_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let req = post_request_with_auth(&format!("/api/v1/release-sources/{}/reset", id), &token); + let (status, _): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::FORBIDDEN); +} diff --git a/tests/api/tracking.rs b/tests/api/tracking.rs index 2aa9adcd..bd0e4c53 100644 --- a/tests/api/tracking.rs +++ b/tests/api/tracking.rs @@ -72,7 +72,6 @@ async fn get_tracking_returns_virtual_default_when_no_row() { let dto = dto.unwrap(); assert_eq!(dto.series_id, series_id); assert!(!dto.tracked); - assert_eq!(dto.tracking_status, "unknown"); assert!(dto.track_chapters); assert!(dto.track_volumes); } @@ -105,7 +104,6 @@ async fn patch_tracking_creates_then_updates() { let app1 = create_test_router(state.clone()).await; let body = UpdateSeriesTrackingRequest { tracked: Some(true), - tracking_status: Some("ongoing".to_string()), latest_known_chapter: Some(Some(142.5)), ..Default::default() }; @@ -118,7 +116,6 @@ async fn patch_tracking_creates_then_updates() { assert_eq!(status, StatusCode::OK); let dto = dto.unwrap(); assert!(dto.tracked); - assert_eq!(dto.tracking_status, "ongoing"); assert_eq!(dto.latest_known_chapter, Some(142.5)); // Second PATCH: only update one field; others persist. @@ -136,31 +133,9 @@ async fn patch_tracking_creates_then_updates() { assert_eq!(status, StatusCode::OK); let dto = dto.unwrap(); assert!(dto.tracked, "tracked should persist"); - assert_eq!(dto.tracking_status, "ongoing", "status should persist"); assert_eq!(dto.latest_known_chapter, Some(143.0)); } -#[tokio::test] -async fn patch_tracking_rejects_invalid_status() { - let (db, _temp) = setup_test_db().await; - let (_lib, series_id) = create_test_series(&db).await; - let state = create_test_auth_state(db.clone()).await; - let token = create_admin_and_token(&db, &state).await; - let app = create_test_router(state).await; - - let body = UpdateSeriesTrackingRequest { - tracking_status: Some("paused".to_string()), - ..Default::default() - }; - let req = patch_json_request_with_auth( - &format!("/api/v1/series/{}/tracking", series_id), - &body, - &token, - ); - let (status, _): (StatusCode, Option) = make_json_request(app, req).await; - assert_eq!(status, StatusCode::BAD_REQUEST); -} - #[tokio::test] async fn patch_tracking_requires_auth() { let (db, _temp) = setup_test_db().await; diff --git a/web/openapi.json b/web/openapi.json index dda4e3ec..c1cd25fb 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -7299,6 +7299,54 @@ ] } }, + "/api/v1/release-sources/{source_id}/reset": { + "post": { + "tags": [ + "Releases" + ], + "summary": "Reset a release source to a clean slate.", + "description": "Deletes every `release_ledger` row owned by the source and clears the\nsource's transient poll state (`etag`, `last_polled_at`, `last_error`,\n`last_error_at`, `last_summary`). User-managed fields (`enabled`,\n`poll_interval_s`, `display_name`, `config`) are preserved.\n\nIntended for testing/troubleshooting: after a reset, the next poll\nfetches the upstream feed without an `If-None-Match` header (so no 304\nshort-circuit) and re-records every release as `announced`. Does NOT\nauto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after\nresetting if you want immediate re-fetch.", + "operationId": "reset_release_source", + "parameters": [ + { + "name": "source_id", + "in": "path", + "description": "Source ID", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "200": { + "description": "Source reset", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ResetReleaseSourceResponse" + } + } + } + }, + "403": { + "description": "PluginsManage permission required" + }, + "404": { + "description": "Source not found" + } + }, + "security": [ + { + "jwt_bearer": [] + }, + { + "api_key": [] + } + ] + } + }, "/api/v1/releases": { "get": { "tags": [ @@ -12332,9 +12380,6 @@ } } }, - "400": { - "description": "Invalid tracking_status" - }, "403": { "description": "Forbidden" }, @@ -23554,6 +23599,36 @@ "description": "Volume announced (if the source emits volumes)." } } + }, + { + "type": "object", + "description": "A release source's poll task completed.\n\nEmitted at the end of every `poll_release_source` task run, after\n`release_sources.last_summary` / `last_polled_at` / `etag` have been\npersisted. The frontend uses this to refresh the Release tracking\nsettings page in real time so users don't have to reload to see a\n\"Poll now\" finish. Carries no diff details — receivers should\ninvalidate the source query and re-read the row.", + "required": [ + "sourceId", + "pluginId", + "hadError", + "type" + ], + "properties": { + "hadError": { + "type": "boolean", + "description": "`true` if the poll wrote a `last_error`. Cheap \"did it fail\"\nhint without forcing the client to refetch." + }, + "pluginId": { + "type": "string", + "description": "Plugin that owns the source (`release_sources.plugin_id`).\nCheap filter for clients only watching certain plugins." + }, + "sourceId": { + "type": "string", + "format": "uuid" + }, + "type": { + "type": "string", + "enum": [ + "release_source_polled" + ] + } + } } ], "description": "Specific event types for entity changes" @@ -29427,6 +29502,7 @@ "required": [ "id", "seriesId", + "seriesTitle", "sourceId", "externalReleaseId", "payloadUrl", @@ -29501,6 +29577,11 @@ "format": "uuid", "example": "550e8400-e29b-41d4-a716-446655440002" }, + "seriesTitle": { + "type": "string", + "description": "Series title at the time of the response. Joined from the `series`\ntable so the inbox UI can render a human-readable label without a\nfollow-up fetch. Falls back to the empty string only if the series\nrow was hard-deleted between the join and the read.", + "example": "Chainsaw Man" + }, "sourceId": { "type": "string", "format": "uuid", @@ -32778,6 +32859,7 @@ "required": [ "id", "seriesId", + "seriesTitle", "sourceId", "externalReleaseId", "payloadUrl", @@ -32852,6 +32934,11 @@ "format": "uuid", "example": "550e8400-e29b-41d4-a716-446655440002" }, + "seriesTitle": { + "type": "string", + "description": "Series title at the time of the response. Joined from the `series`\ntable so the inbox UI can render a human-readable label without a\nfollow-up fetch. Falls back to the empty string only if the series\nrow was hard-deleted between the join and the read.", + "example": "Chainsaw Man" + }, "sourceId": { "type": "string", "format": "uuid", @@ -33602,6 +33689,21 @@ } } }, + "ResetReleaseSourceResponse": { + "type": "object", + "description": "Response shape from the `reset` endpoint.\n\nReturns the number of ledger rows removed so callers can show a\nconfirmation toast. The source's transient poll state (etag,\nlast_polled_at, last_error, last_summary) is also cleared, but those\nare not counted here.", + "required": [ + "deletedLedgerEntries" + ], + "properties": { + "deletedLedgerEntries": { + "type": "integer", + "format": "int64", + "description": "Number of `release_ledger` rows deleted for this source.", + "minimum": 0 + } + } + }, "RetryAllErrorsRequest": { "type": "object", "description": "Request body for bulk retrying all book errors", @@ -35082,7 +35184,6 @@ "required": [ "seriesId", "tracked", - "trackingStatus", "trackChapters", "trackVolumes", "createdAt", @@ -35154,11 +35255,6 @@ "type": "boolean", "description": "Whether release tracking is enabled." }, - "trackingStatus": { - "type": "string", - "description": "Publication status: `ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`.", - "example": "ongoing" - }, "updatedAt": { "type": "string", "format": "date-time", @@ -38294,13 +38390,6 @@ "null" ] }, - "trackingStatus": { - "type": [ - "string", - "null" - ], - "description": "`ongoing` | `complete` | `hiatus` | `cancelled` | `unknown`." - }, "volumeChapterMap": {} } }, diff --git a/web/src/api/releases.ts b/web/src/api/releases.ts index 042ceb11..f2c807b8 100644 --- a/web/src/api/releases.ts +++ b/web/src/api/releases.ts @@ -11,6 +11,8 @@ export type PaginatedReleases = components["schemas"]["PaginatedResponse_ReleaseLedgerEntryDto"]; export type ReleaseTrackingApplicability = components["schemas"]["ApplicabilityResponse"]; +export type ResetReleaseSourceResponse = + components["schemas"]["ResetReleaseSourceResponse"]; export interface ReleaseInboxParams { state?: string; @@ -112,6 +114,22 @@ export const releaseSourcesApi = { return response.data; }, + /** + * Drop every ledger row for this source and clear its transient poll + * state (etag, last_polled_at, last_error, last_summary). User-managed + * fields (enabled, pollIntervalS, displayName, config) are preserved. + * + * Used as a "force re-emit" lever for testing: after a reset, the next + * poll fetches the upstream feed without `If-None-Match` (no 304 + * short-circuit) and re-records every release as `announced`. + */ + reset: async (sourceId: string): Promise => { + const response = await api.post( + `/release-sources/${sourceId}/reset`, + ); + return response.data; + }, + /** * Whether release tracking is available for a given library scope. * diff --git a/web/src/components/series/SeriesReleasesPanel.test.tsx b/web/src/components/series/SeriesReleasesPanel.test.tsx index 886b88f4..9084928d 100644 --- a/web/src/components/series/SeriesReleasesPanel.test.tsx +++ b/web/src/components/series/SeriesReleasesPanel.test.tsx @@ -29,6 +29,7 @@ function entry(over: Partial = {}): ReleaseLedgerEntry { return { id: "ent-1", seriesId: SERIES_ID, + seriesTitle: "Series", sourceId: "11111111-1111-1111-1111-111111111111", externalReleaseId: "ext-1", payloadUrl: "https://example.com/r/1", diff --git a/web/src/components/series/TrackingPanel.test.tsx b/web/src/components/series/TrackingPanel.test.tsx index 65fd868f..a1b72d0d 100644 --- a/web/src/components/series/TrackingPanel.test.tsx +++ b/web/src/components/series/TrackingPanel.test.tsx @@ -24,7 +24,6 @@ const SERIES_ID = "00000000-0000-0000-0000-000000000001"; const baseTracking = { seriesId: SERIES_ID, tracked: false, - trackingStatus: "unknown", trackChapters: true, trackVolumes: true, createdAt: "2024-01-01T00:00:00Z", @@ -60,24 +59,22 @@ describe("TrackingPanel", () => { ).not.toBeChecked(); }); - // Status select is hidden when not tracked. - expect(screen.queryByText("Status")).not.toBeInTheDocument(); + // Announce switches are hidden when not tracked. + expect(screen.queryByText("Announce")).not.toBeInTheDocument(); }); - it("shows status and announce flags when tracked", async () => { + it("shows announce flags when tracked", async () => { get.mockResolvedValue({ ...baseTracking, tracked: true, - trackingStatus: "ongoing", latestKnownChapter: 142.5, }); renderWithProviders(); await waitFor(() => { - expect(screen.getByText("Status")).toBeInTheDocument(); + expect(screen.getByText("Announce")).toBeInTheDocument(); }); - expect(screen.getByText(/Ongoing/i)).toBeInTheDocument(); expect(screen.getByLabelText("Chapters")).toBeChecked(); expect(screen.getByLabelText("Volumes")).toBeChecked(); }); diff --git a/web/src/components/series/TrackingPanel.tsx b/web/src/components/series/TrackingPanel.tsx index aea2ede7..7a418bcb 100644 --- a/web/src/components/series/TrackingPanel.tsx +++ b/web/src/components/series/TrackingPanel.tsx @@ -8,7 +8,6 @@ import { Divider, Group, NumberInput, - Select, Stack, Switch, Text, @@ -38,14 +37,6 @@ interface TrackingPanelProps { canEdit: boolean; } -const STATUS_OPTIONS = [ - { value: "unknown", label: "Unknown" }, - { value: "ongoing", label: "Ongoing" }, - { value: "complete", label: "Complete" }, - { value: "hiatus", label: "Hiatus" }, - { value: "cancelled", label: "Cancelled" }, -]; - /** * Inline panel on the series detail page for release-tracking config. * @@ -145,45 +136,33 @@ export function TrackingPanel({ seriesId, canEdit }: TrackingPanelProps) { {tracking?.tracked && ( <> - - - { + setLibraryId(value ?? ALL_VALUE); + setPage(1); + }} + w={220} + allowDeselect={false} + searchable + comboboxProps={{ withinPortal: true }} + /> + { + setSeriesId(value ?? ALL_VALUE); setPage(1); }} w={320} + allowDeselect={false} + searchable + nothingFoundMessage="No series with releases" + comboboxProps={{ withinPortal: true }} /> + {selected.size > 0 && ( + + + + {selected.size} selected + + + + + + + + + + )} + {error && ( @@ -153,6 +372,14 @@ export function ReleasesInbox() { + + + Series Ch / Vol Source / Group @@ -168,8 +395,23 @@ export function ReleasesInbox() { color: "gray", label: entry.state, }; + const isSelected = selected.has(entry.id); return ( - + + + toggleOne(entry.id)} + /> + dismiss.mutate(entry.id)} aria-label="Dismiss" @@ -272,6 +514,18 @@ export function ReleasesInbox() { )} + + deleteRelease.mutate(entry.id)} + aria-label="Delete" + > + + + @@ -293,6 +547,38 @@ export function ReleasesInbox() { )} + + + + + This will hard-delete {selected.size}{" "} + {selected.size === 1 ? "release" : "releases"} from the ledger and + clear the affected sources' cache so they re-fetch on the next poll. + The releases will reappear if the upstream still lists them. + + + + + + + ); } diff --git a/web/src/test/setup.ts b/web/src/test/setup.ts index a5b95c10..58bdc660 100644 --- a/web/src/test/setup.ts +++ b/web/src/test/setup.ts @@ -120,6 +120,13 @@ global.ResizeObserver = class ResizeObserver { unobserve() {} } as any; +// jsdom doesn't implement Element.scrollIntoView, but Mantine's Combobox +// calls it on the active option after clicks. Stubbing here prevents +// "scrollIntoView is not a function" unhandled errors in dropdown tests. +if (!Element.prototype.scrollIntoView) { + Element.prototype.scrollIntoView = vi.fn(); +} + // Mock EventSource for SSE tests global.EventSource = class EventSource { url: string; diff --git a/web/src/types/api.generated.ts b/web/src/types/api.generated.ts index 87c49e24..c6868081 100644 --- a/web/src/types/api.generated.ts +++ b/web/src/types/api.generated.ts @@ -2557,7 +2557,7 @@ export interface paths { patch?: never; trace?: never; }; - "/api/v1/releases/{release_id}": { + "/api/v1/releases/bulk": { parameters: { query?: never; header?: never; @@ -2566,10 +2566,64 @@ export interface paths { }; get?: never; put?: never; + /** + * Apply an action to a batch of ledger rows. + * @description `dismiss` and `mark-acquired` set state in-place. `delete` removes + * the rows and clears the affected sources' etags so the next poll + * re-fetches without `If-None-Match`. All three run as bulk SQL — no + * per-row round trips — so this scales to deleting thousands of rows in + * one call. + */ + post: operations["bulk_release_action"]; + delete?: never; + options?: never; + head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/releases/facets": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + /** + * Distinct values present in the inbox under the given filters. + * @description Returns the languages, libraries, and series that have at least one + * matching ledger row. The frontend uses this to populate cascading + * Select dropdowns so users never have to type a UUID and never see + * dropdown options that would yield zero results. + */ + get: operations["list_release_facets"]; + put?: never; post?: never; delete?: never; options?: never; head?: never; + patch?: never; + trace?: never; + }; + "/api/v1/releases/{release_id}": { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + get?: never; + put?: never; + post?: never; + /** + * Hard-delete a single ledger row. + * @description Also clears the source's `etag` so the next poll bypasses + * `If-None-Match` and re-records the deleted row in `announced` state + * (assuming the upstream still lists it). This is the lever users want + * when they marked something incorrectly and need to "get it back". + */ + delete: operations["delete_release"]; + options?: never; + head?: never; /** PATCH a ledger entry's state (general-purpose state transition). */ patch: operations["update_release_entry"]; trace?: never; @@ -8657,6 +8711,27 @@ export interface components { */ year?: number | null; }; + /** + * @description Action requested by `POST /api/v1/releases/bulk`. + * @enum {string} + */ + BulkReleaseAction: "dismiss" | "mark-acquired" | "delete"; + /** @description Request body for `POST /api/v1/releases/bulk`. */ + BulkReleaseActionRequest: { + action: components["schemas"]["BulkReleaseAction"]; + ids: string[]; + }; + /** @description Response from `POST /api/v1/releases/bulk`. */ + BulkReleaseActionResponse: { + /** @description Action that ran (echoed back for client-side confirmation toasts). */ + action: components["schemas"]["BulkReleaseAction"]; + /** + * Format: int64 + * @description Number of ledger rows actually affected. Less than `ids.len()` when + * some IDs were already deleted concurrently. + */ + affected: number; + }; /** @description Request for bulk renumber operations on multiple series */ BulkRenumberSeriesRequest: { /** @@ -9316,6 +9391,18 @@ export interface components { */ message: string; }; + /** + * @description Response from `DELETE /api/v1/releases/{id}`. + * + * Single-row delete returns a small confirmation rather than 204 so the + * frontend can surface a toast that mentions the etag clear ("the next + * poll will re-fetch this release"). Mirrors the bulk-delete shape with + * `affected = 1`. + */ + DeleteReleaseResponse: { + /** @description `true` if the row was deleted, `false` if it didn't exist. */ + deleted: boolean; + }; /** @description Detected series information for preview */ DetectedSeriesDto: { /** @description Number of books detected */ @@ -14728,6 +14815,26 @@ export interface components { /** @description User information */ user: components["schemas"]["UserInfo"]; }; + /** + * @description Response shape for `GET /api/v1/releases/facets`. + * + * Each list reflects the distinct values present in the ledger under the + * **other** active filters (Solr-style facet exclusion), so dropdowns + * never offer combinations that would yield zero results. The frontend + * uses these to populate cascading filter Select inputs without forcing + * the user to type UUIDs. + */ + ReleaseFacetsResponse: { + languages: components["schemas"]["ReleaseLanguageFacetDto"][]; + libraries: components["schemas"]["ReleaseLibraryFacetDto"][]; + series: components["schemas"]["ReleaseSeriesFacetDto"][]; + }; + /** @description One language option in the inbox facets response. */ + ReleaseLanguageFacetDto: { + /** Format: int64 */ + count: number; + language: string; + }; /** @description A single release announcement. Sources write these; the inbox reads them. */ ReleaseLedgerEntryDto: { /** @@ -14802,6 +14909,32 @@ export interface components { ReleaseLedgerListResponse: { entries: components["schemas"]["ReleaseLedgerEntryDto"][]; }; + /** @description One library option in the inbox facets response. */ + ReleaseLibraryFacetDto: { + /** Format: int64 */ + count: number; + /** Format: uuid */ + libraryId: string; + libraryName: string; + }; + /** + * @description One series option in the inbox facets response. Carries the joined + * `library_id` and `library_name` so the frontend can group the dropdown + * by library without a follow-up call. + */ + ReleaseSeriesFacetDto: { + /** + * Format: int64 + * @description Number of ledger rows matching the active filter for this series. + */ + count: number; + /** Format: uuid */ + libraryId: string; + libraryName: string; + /** Format: uuid */ + seriesId: string; + seriesTitle: string; + }; /** @description A configured release source (one row per logical feed). */ ReleaseSourceDto: { /** @description Source-specific configuration (free-form). */ @@ -23642,11 +23775,16 @@ export interface operations { list_release_inbox: { parameters: { query?: { - /** @description Filter by state. Defaults to `announced`. */ + /** + * @description Filter by state. Defaults to `announced`. Pass `all` to disable + * state filtering entirely (returns rows in every state). + */ state?: string | null; seriesId?: string | null; sourceId?: string | null; language?: string | null; + /** @description Restrict to series belonging to this library. */ + libraryId?: string | null; page?: number; pageSize?: number; }; @@ -23674,6 +23812,121 @@ export interface operations { }; }; }; + bulk_release_action: { + parameters: { + query?: never; + header?: never; + path?: never; + cookie?: never; + }; + requestBody: { + content: { + "application/json": components["schemas"]["BulkReleaseActionRequest"]; + }; + }; + responses: { + /** @description Bulk action applied */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["BulkReleaseActionResponse"]; + }; + }; + /** @description Empty ID list or invalid action */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Forbidden */ + 403: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + }; + }; + list_release_facets: { + parameters: { + query?: { + state?: string | null; + seriesId?: string | null; + sourceId?: string | null; + language?: string | null; + libraryId?: string | null; + }; + header?: never; + path?: never; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Facets for the inbox view */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["ReleaseFacetsResponse"]; + }; + }; + /** @description Invalid state filter */ + 400: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description SeriesRead permission required */ + 403: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + }; + }; + delete_release: { + parameters: { + query?: never; + header?: never; + path: { + /** @description Ledger entry ID */ + release_id: string; + }; + cookie?: never; + }; + requestBody?: never; + responses: { + /** @description Release deleted */ + 200: { + headers: { + [name: string]: unknown; + }; + content: { + "application/json": components["schemas"]["DeleteReleaseResponse"]; + }; + }; + /** @description Forbidden */ + 403: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + /** @description Ledger entry not found */ + 404: { + headers: { + [name: string]: unknown; + }; + content?: never; + }; + }; + }; update_release_entry: { parameters: { query?: never; From d387d218d25d4774956f8d43e8f983187bb19340 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Tue, 5 May 2026 17:17:42 -0700 Subject: [PATCH 21/29] feat(release-tracking): unify inbox + series releases UI and fix bulk-poll dedup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Inbox and the series-detail Releases panel were diverging in subtle ways (sort order, source label, missing actions, duplicated table JSX). Extract the shared surface and make both views identical where it matters. UI: - New shared components: ReleasesTable, ReleasesBulkActionBar, ReleasesBulkDeleteModal. Both pages compose them; row markup, action buttons, and bulk-delete confirm flow are now identical. - Series detail panel: collapsed by default, joins ReleaseSources for the display label (no more "source: 11111111…"), gains per-row delete and the full bulk action bar (Mark acquired / Dismiss / Delete + confirm). - Series detail layout: TrackingPanel and Releases panel moved below Custom Metadata so identifying data stays at the top. - Inbox + series panel rows render flat: each row carries its own Ch/Vol label, dropping the "blank cell on subsequent rows of the same chapter" grouping that made bulk selection ambiguous. Sort order: - Inbox: group every row of a series together (highest volume/chapter first), then break series ties by series.name ASC. Joins the series table so cross-series order is alphabetical instead of by UUID. Previously, a fresh poll batch split each series into "new" and "old" desc clusters by observed_at; now a series' chapter list reads as one contiguous descending sequence regardless of which poll surfaced each row. - Per-series ledger view (list_for_series) mirrors the same sort, so the series-detail panel matches the inbox. Backend: - Task dedup gains TaskType::dedup_params(): for task types whose identity lives in `params` (PollReleaseSource keyed on source_id), find_existing_task now filters by the JSON param in addition to task_type. Without this, two "Poll now" clicks for different sources were silently coalesced onto the first source's in-flight task. - ReleaseTrackingSettings tracks per-source poll/reset pending state so one row's spinner doesn't light up every other row. Tests added for: alphabetical cross-series sort, contiguous per-series chapters across observation batches, list_for_series chapter-desc ordering, panel bulk delete + confirm flow, per-source pending state. --- src/db/repositories/release_ledger.rs | 265 ++++++++- src/db/repositories/task.rs | 52 +- src/tasks/types.rs | 20 + tests/task_recovery_integration.rs | 70 +++ .../releases/ReleasesBulkActionBar.tsx | 78 +++ .../releases/ReleasesBulkDeleteModal.tsx | 41 ++ web/src/components/releases/ReleasesTable.tsx | 238 ++++++++ .../series/SeriesReleasesPanel.test.tsx | 218 +++++++- .../components/series/SeriesReleasesPanel.tsx | 381 ++++++------- web/src/pages/ReleasesInbox.test.tsx | 36 +- web/src/pages/ReleasesInbox.tsx | 521 +++++------------- web/src/pages/SeriesDetail.tsx | 30 +- .../settings/ReleaseTrackingSettings.test.tsx | 52 ++ .../settings/ReleaseTrackingSettings.tsx | 42 +- 14 files changed, 1399 insertions(+), 645 deletions(-) create mode 100644 web/src/components/releases/ReleasesBulkActionBar.tsx create mode 100644 web/src/components/releases/ReleasesBulkDeleteModal.tsx create mode 100644 web/src/components/releases/ReleasesTable.tsx diff --git a/src/db/repositories/release_ledger.rs b/src/db/repositories/release_ledger.rs index 02f00313..8be3f63d 100644 --- a/src/db/repositories/release_ledger.rs +++ b/src/db/repositories/release_ledger.rs @@ -10,8 +10,8 @@ use anyhow::Result; use chrono::Utc; use sea_orm::{ - ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, PaginatorTrait, QueryFilter, - QueryOrder, QuerySelect, Set, + ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, Order, PaginatorTrait, + QueryFilter, QueryOrder, QuerySelect, Set, sea_query::NullOrdering, }; use uuid::Uuid; @@ -163,8 +163,9 @@ impl ReleaseLedgerRepository { }) } - /// Per-series ledger view: ordered by `observed_at` desc, with optional - /// state filter. + /// Per-series ledger view: highest volume/chapter first, then most recent + /// observation as a tie-breaker. Matches the inbox ordering so the series + /// detail panel reads the same way as the cross-series list. pub async fn list_for_series( db: &DatabaseConnection, series_id: Uuid, @@ -174,7 +175,18 @@ impl ReleaseLedgerRepository { ) -> Result> { let mut query = ReleaseLedger::find() .filter(release_ledger::Column::SeriesId.eq(series_id)) - .order_by_desc(release_ledger::Column::ObservedAt); + .order_by_with_nulls( + release_ledger::Column::Volume, + Order::Desc, + NullOrdering::Last, + ) + .order_by_with_nulls( + release_ledger::Column::Chapter, + Order::Desc, + NullOrdering::Last, + ) + .order_by_desc(release_ledger::Column::ObservedAt) + .order_by_asc(release_ledger::Column::Id); if let Some(s) = state_filter { query = query.filter(release_ledger::Column::State.eq(s)); } @@ -188,14 +200,41 @@ impl ReleaseLedgerRepository { } /// Inbox view across all series, with filters. + /// + /// Sort order: group all rows of a series together (highest volume/chapter + /// on top), then break ties between series by the most recent observation. + /// Grouping by series first matches how users read the inbox: they want + /// every chapter of a series listed contiguously and descending, even when + /// rows come from multiple poll batches with different `observed_at`s. + /// + /// Inner-joins `series` so the cross-series order is by `series.name` + /// (alphabetical) rather than by `series_id` (a meaningless UUID order). pub async fn list_inbox( db: &DatabaseConnection, filter: LedgerInboxFilter, limit: u64, offset: u64, ) -> Result> { - let mut query = ReleaseLedger::find().order_by_desc(release_ledger::Column::ObservedAt); - query = apply_inbox_filter(query, &filter, false); + use sea_orm::{JoinType, RelationTrait}; + let mut query = ReleaseLedger::find() + .join(JoinType::InnerJoin, release_ledger::Relation::Series.def()) + .order_by_asc(crate::db::entities::series::Column::Name) + .order_by_asc(release_ledger::Column::SeriesId) + .order_by_with_nulls( + release_ledger::Column::Volume, + Order::Desc, + NullOrdering::Last, + ) + .order_by_with_nulls( + release_ledger::Column::Chapter, + Order::Desc, + NullOrdering::Last, + ) + .order_by_desc(release_ledger::Column::ObservedAt) + .order_by_asc(release_ledger::Column::Id); + // `series_already_joined: true` so apply_inbox_filter doesn't add + // a duplicate join when `library_id` is present in the filter. + query = apply_inbox_filter(query, &filter, true); if limit > 0 { query = query.limit(limit); } @@ -629,6 +668,36 @@ mod tests { assert!(err.to_string().contains("NaN")); } + #[tokio::test] + async fn list_for_series_sorts_chapter_desc_over_observed_at() { + // The series detail panel must mirror the inbox's per-series order: + // highest chapter wins, even if a lower chapter was observed later. + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_id) = setup_world(conn).await; + + let now = Utc::now(); + let mut high_old = entry(series_id, source_id, "rel-high"); + high_old.chapter = Some(200.0); + high_old.observed_at = now - chrono::Duration::hours(6); + let mut low_new = entry(series_id, source_id, "rel-low"); + low_new.chapter = Some(150.0); + low_new.observed_at = now; + ReleaseLedgerRepository::record(conn, high_old) + .await + .unwrap(); + ReleaseLedgerRepository::record(conn, low_new) + .await + .unwrap(); + + let rows = ReleaseLedgerRepository::list_for_series(conn, series_id, None, 10, 0) + .await + .unwrap(); + assert_eq!(rows.len(), 2); + assert_eq!(rows[0].chapter, Some(200.0)); + assert_eq!(rows[1].chapter, Some(150.0)); + } + #[tokio::test] async fn list_for_series_orders_by_observed_at_desc() { let (db, _temp) = create_test_db().await; @@ -691,6 +760,188 @@ mod tests { assert_eq!(dismissed[0].external_release_id, "rel-1"); } + #[tokio::test] + async fn list_inbox_orders_series_alphabetically_by_name() { + // Cross-series ordering used to be by `series_id` (UUID), which is + // deterministic but meaningless to users. Now the inbox joins `series` + // and orders by `name ASC`, so "A series" appears before "Z series". + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let library = LibraryRepository::create(conn, "Lib", "/lib", ScanningStrategy::Default) + .await + .unwrap(); + let source = ReleaseSourceRepository::create( + conn, + NewReleaseSource { + plugin_id: "release-nyaa".to_string(), + source_key: "nyaa:user:tsuna69".to_string(), + display_name: "Nyaa - tsuna69".to_string(), + kind: kind::RSS_UPLOADER.to_string(), + poll_interval_s: 3600, + enabled: None, + config: None, + }, + ) + .await + .unwrap(); + // Create series in reverse alphabetical order to prove the sort isn't + // just preserving insertion order. + let zebra = SeriesRepository::create(conn, library.id, "Zebra", None) + .await + .unwrap(); + let middle = SeriesRepository::create(conn, library.id, "Middle", None) + .await + .unwrap(); + let alpha = SeriesRepository::create(conn, library.id, "Alpha", None) + .await + .unwrap(); + + for sid in [zebra.id, middle.id, alpha.id] { + ReleaseLedgerRepository::record(conn, entry(sid, source.id, &format!("rel-{}", sid))) + .await + .unwrap(); + } + + let rows = ReleaseLedgerRepository::list_inbox(conn, LedgerInboxFilter::default(), 100, 0) + .await + .unwrap(); + let series_order: Vec = rows.iter().map(|r| r.series_id).collect(); + assert_eq!( + series_order, + vec![alpha.id, middle.id, zebra.id], + "inbox should list series alphabetically by series.name" + ); + } + + #[tokio::test] + async fn list_inbox_groups_series_across_observation_batches() { + // Bug repro: when a series has rows from two separate poll batches + // (different `observed_at`s), the inbox must still list every chapter + // contiguously and descending — not split into two desc clusters by + // batch. A user reading the inbox doesn't care which poll surfaced a + // chapter; they want the series' chapter list, in order. + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_id) = setup_world(conn).await; + + let now = Utc::now(); + let earlier = now - chrono::Duration::hours(6); + // Earlier batch: lower chapters. Later batch: higher chapters. + for ch in [122.0_f64, 123.0, 124.0, 125.0] { + let mut e = entry(series_id, source_id, &format!("rel-{}", ch)); + e.chapter = Some(ch); + e.observed_at = earlier; + ReleaseLedgerRepository::record(conn, e).await.unwrap(); + } + for ch in [150.0_f64, 151.0, 156.0] { + let mut e = entry(series_id, source_id, &format!("rel-{}", ch)); + e.chapter = Some(ch); + e.observed_at = now; + ReleaseLedgerRepository::record(conn, e).await.unwrap(); + } + + let rows = ReleaseLedgerRepository::list_inbox(conn, LedgerInboxFilter::default(), 100, 0) + .await + .unwrap(); + let chapters: Vec = rows.iter().filter_map(|r| r.chapter).collect(); + assert_eq!( + chapters, + vec![156.0, 151.0, 150.0, 125.0, 124.0, 123.0, 122.0], + "chapters of one series must be one contiguous desc list, regardless of observed_at batch" + ); + } + + #[tokio::test] + async fn list_inbox_orders_chapters_desc_within_series() { + // A poll batch records every release with the same `observed_at`. The + // inbox must still present the highest chapter first per series, not + // the arbitrary order rows happened to be inserted in. + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_id) = setup_world(conn).await; + + let now = Utc::now(); + // Insert in shuffled chapter order to prove the DB is doing the sort. + for ch in [129.0_f64, 145.0, 122.0, 150.5, 137.0, 156.0, 138.0] { + let mut e = entry(series_id, source_id, &format!("rel-{}", ch)); + e.chapter = Some(ch); + e.observed_at = now; + ReleaseLedgerRepository::record(conn, e).await.unwrap(); + } + + let rows = ReleaseLedgerRepository::list_inbox(conn, LedgerInboxFilter::default(), 100, 0) + .await + .unwrap(); + let chapters: Vec = rows.iter().filter_map(|r| r.chapter).collect(); + assert_eq!( + chapters, + vec![156.0, 150.5, 145.0, 138.0, 137.0, 129.0, 122.0], + "rows of the same series must be sorted by chapter desc" + ); + } + + #[tokio::test] + async fn list_inbox_groups_series_with_chapters_desc_inside() { + // Two series in the same poll batch: the inbox must keep each series' + // rows contiguous and sort their chapters descending. The cross-series + // order is by series_id ASC (deterministic, but not user-meaningful). + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_a, src) = setup_world(conn).await; + let library = LibraryRepository::create(conn, "Lib2", "/lib2", ScanningStrategy::Default) + .await + .unwrap(); + let series_b = SeriesRepository::create(conn, library.id, "Series B", None) + .await + .unwrap(); + + let now = Utc::now(); + let mut a1 = entry(series_a, src, "a-1"); + a1.chapter = Some(10.0); + a1.observed_at = now; + let mut a2 = entry(series_a, src, "a-2"); + a2.chapter = Some(20.0); + a2.observed_at = now; + let mut b1 = entry(series_b.id, src, "b-1"); + b1.chapter = Some(5.0); + b1.observed_at = now; + let mut b2 = entry(series_b.id, src, "b-2"); + b2.chapter = Some(7.0); + b2.observed_at = now; + // Insert interleaved to prove ordering doesn't leak from insertion order. + ReleaseLedgerRepository::record(conn, a1).await.unwrap(); + ReleaseLedgerRepository::record(conn, b1).await.unwrap(); + ReleaseLedgerRepository::record(conn, a2).await.unwrap(); + ReleaseLedgerRepository::record(conn, b2).await.unwrap(); + + let rows = ReleaseLedgerRepository::list_inbox(conn, LedgerInboxFilter::default(), 100, 0) + .await + .unwrap(); + // Each series' rows must be contiguous and chapter-desc internally. + let series_groups: Vec> = rows + .iter() + .map(|r| (r.series_id, r.chapter.unwrap())) + .fold(Vec::new(), |mut acc, (sid, ch)| { + if acc.last().is_some_and(|g: &Vec<_>| g[0].0 == sid) { + acc.last_mut().unwrap().push((sid, ch)); + } else { + acc.push(vec![(sid, ch)]); + } + acc + }); + assert_eq!( + series_groups.len(), + 2, + "rows of each series must be contiguous" + ); + for group in &series_groups { + let chs: Vec = group.iter().map(|(_, c)| *c).collect(); + let mut sorted = chs.clone(); + sorted.sort_by(|a, b| b.partial_cmp(a).unwrap()); + assert_eq!(chs, sorted, "chapters within a series must be desc"); + } + } + #[tokio::test] async fn list_inbox_supports_combined_filters() { let (db, _temp) = create_test_db().await; diff --git a/src/db/repositories/task.rs b/src/db/repositories/task.rs index 3e984943..21b8b1a5 100644 --- a/src/db/repositories/task.rs +++ b/src/db/repositories/task.rs @@ -118,9 +118,7 @@ impl TaskRepository { let params = task_type.params(); // Check if a task already exists for this entity - if let Some(existing_task) = - Self::find_existing_task(db, type_str, library_id, series_id, book_id).await? - { + if let Some(existing_task) = Self::find_existing_task(db, &task_type).await? { info!( "Task already exists: {} ({}) - skipping duplicate", existing_task.id, type_str @@ -166,10 +164,7 @@ impl TaskRepository { if err_str.contains("unique") || err_str.contains("duplicate") { // Race condition: another task was inserted between our check and insert // Find and return the existing task - if let Some(existing_task) = - Self::find_existing_task(db, type_str, library_id, series_id, book_id) - .await? - { + if let Some(existing_task) = Self::find_existing_task(db, &task_type).await? { info!( "Task was created concurrently: {} ({}) - using existing task", existing_task.id, type_str @@ -202,9 +197,7 @@ impl TaskRepository { let params = task_type.params(); // Check if a task already exists for this entity - if let Some(existing_task) = - Self::find_existing_task(db, type_str, library_id, series_id, book_id).await? - { + if let Some(existing_task) = Self::find_existing_task(db, &task_type).await? { info!( "Task already exists: {} ({}) - skipping duplicate", existing_task.id, type_str @@ -249,10 +242,7 @@ impl TaskRepository { Err(e) => { let err_str = e.to_string().to_lowercase(); if err_str.contains("unique") || err_str.contains("duplicate") { - if let Some(existing_task) = - Self::find_existing_task(db, type_str, library_id, series_id, book_id) - .await? - { + if let Some(existing_task) = Self::find_existing_task(db, &task_type).await? { info!( "Task was created concurrently: {} ({}) - using existing task", existing_task.id, type_str @@ -382,14 +372,26 @@ impl TaskRepository { Ok(enqueued) } - /// Find an existing pending/processing task for the given entity + /// Find an existing pending/processing task for the given task. + /// + /// Dedup key, in order of preference: + /// 1. The most specific FK column set on the task (`book_id` > + /// `series_id` > `library_id`). + /// 2. The JSON-param pair returned by `TaskType::dedup_params()`, for + /// task types whose identity lives in `params` (e.g. + /// `PollReleaseSource`). Without this, two such tasks differing only + /// in `params` would falsely collide on `task_type` alone. + /// 3. None — only `task_type` and status are matched. This is the + /// desired behavior for singleton task types like `FindDuplicates`. async fn find_existing_task( db: &DatabaseConnection, - task_type: &str, - library_id: Option, - series_id: Option, - book_id: Option, + task: &TaskType, ) -> Result> { + let task_type = task.type_string(); + let library_id = task.library_id(); + let series_id = task.series_id(); + let book_id = task.book_id(); + let mut query = Tasks::find() .filter(tasks::Column::TaskType.eq(task_type)) .filter(tasks::Column::Status.is_in(["pending", "processing"])); @@ -401,6 +403,18 @@ impl TaskRepository { query = query.filter(tasks::Column::SeriesId.eq(ser_id)); } else if let Some(lib_id) = library_id { query = query.filter(tasks::Column::LibraryId.eq(lib_id)); + } else if let Some((key, value)) = task.dedup_params() { + // Params-based dedup: route through the helper that knows how + // to query JSON params portably across SQLite and Postgres. + return match Self::find_pending_or_processing_by_param(db, task_type, key, &value) + .await? + { + Some(id) => Tasks::find_by_id(id) + .one(db) + .await + .context("Failed to load existing task by id"), + None => Ok(None), + }; } query.one(db).await.context("Failed to find existing task") diff --git a/src/tasks/types.rs b/src/tasks/types.rs index ce916434..dfd8573b 100644 --- a/src/tasks/types.rs +++ b/src/tasks/types.rs @@ -481,6 +481,26 @@ impl TaskType { } } + /// JSON-param key/value pair to use as a dedup discriminator for task + /// types whose identity lives in `params` rather than in FK columns. + /// + /// Returning `Some((key, value))` tells the dedup path in + /// `TaskRepository::find_existing_task` to additionally filter by + /// `params->>key = value`. Without this, two `poll_release_source` tasks + /// for *different* `source_id`s would falsely collide because they share + /// the same `task_type` and have no FK columns set, causing the second + /// "Poll now" click to be silently coalesced onto the first source's + /// in-flight poll. + /// + /// `key` must be a simple identifier (alphanumeric + underscore) since + /// SQLite splices it into a JSON path string. + pub fn dedup_params(&self) -> Option<(&'static str, String)> { + match self { + TaskType::PollReleaseSource { source_id } => Some(("source_id", source_id.to_string())), + _ => None, + } + } + /// Extract all fields needed for database insertion /// Returns: (type_string, library_id, series_id, book_id, params) pub fn extract_fields( diff --git a/tests/task_recovery_integration.rs b/tests/task_recovery_integration.rs index 2bc2238a..2f639cf6 100644 --- a/tests/task_recovery_integration.rs +++ b/tests/task_recovery_integration.rs @@ -627,3 +627,73 @@ async fn test_completed_task_allows_new_task() { "New task should have different ID after previous task completed" ); } + +/// Regression test: enqueueing `poll_release_source` for two different +/// `source_id`s in quick succession must yield two distinct tasks. The dedup +/// path used to match by `task_type` alone for tasks whose identity lives in +/// JSON params (no FK columns), causing the second click on "Poll now" to be +/// silently coalesced onto the first source's in-flight poll. +#[tokio::test] +async fn test_poll_release_source_dedup_is_per_source() { + let (db, _temp_dir) = setup_test_db().await; + + let source_a = Uuid::new_v4(); + let source_b = Uuid::new_v4(); + + let task_a = TaskRepository::enqueue( + &db, + TaskType::PollReleaseSource { + source_id: source_a, + }, + None, + ) + .await + .expect("Failed to enqueue poll for source A"); + + let task_b = TaskRepository::enqueue( + &db, + TaskType::PollReleaseSource { + source_id: source_b, + }, + None, + ) + .await + .expect("Failed to enqueue poll for source B"); + + assert_ne!( + task_a, task_b, + "Polls for distinct release sources must not be deduplicated against each other" + ); + + let stats = TaskRepository::get_stats(&db) + .await + .expect("Failed to get stats"); + assert_eq!(stats.pending, 2, "Both polls should be pending"); +} + +/// Re-enqueueing a poll for the *same* source must still coalesce onto the +/// in-flight task. This is the inverse of the per-source guarantee above and +/// matches the documented `enqueue_poll_now` UX. +#[tokio::test] +async fn test_poll_release_source_dedup_same_source_coalesces() { + let (db, _temp_dir) = setup_test_db().await; + let source_id = Uuid::new_v4(); + + let first = TaskRepository::enqueue(&db, TaskType::PollReleaseSource { source_id }, None) + .await + .expect("Failed to enqueue first poll"); + + let second = TaskRepository::enqueue(&db, TaskType::PollReleaseSource { source_id }, None) + .await + .expect("Failed to enqueue duplicate poll"); + + assert_eq!( + first, second, + "Polls for the same source should coalesce onto the in-flight task" + ); + + let stats = TaskRepository::get_stats(&db) + .await + .expect("Failed to get stats"); + assert_eq!(stats.pending, 1, "Only one pending poll task should exist"); +} diff --git a/web/src/components/releases/ReleasesBulkActionBar.tsx b/web/src/components/releases/ReleasesBulkActionBar.tsx new file mode 100644 index 00000000..0925fd6d --- /dev/null +++ b/web/src/components/releases/ReleasesBulkActionBar.tsx @@ -0,0 +1,78 @@ +import { Button, Card, Group, Text } from "@mantine/core"; +import { IconCheck, IconTrash, IconX } from "@tabler/icons-react"; +import type { BulkReleaseAction } from "@/api/releases"; + +interface ReleasesBulkActionBarProps { + count: number; + isPending: boolean; + onAction: (action: BulkReleaseAction) => void; + onClear: () => void; + /** Show the Delete button. The inbox routes Delete through a confirm modal, + * which it wires up itself; the series panel currently doesn't expose + * bulk-delete (use per-row delete instead). */ + onDeleteClick?: () => void; + /** When true, render as a sticky banner (page-level inbox). Off for the + * embedded series panel where the parent card already provides framing. */ + sticky?: boolean; +} + +export function ReleasesBulkActionBar({ + count, + isPending, + onAction, + onClear, + onDeleteClick, + sticky = false, +}: ReleasesBulkActionBarProps) { + return ( + + + + {count} selected + + + + + {onDeleteClick && ( + + )} + + + + + ); +} diff --git a/web/src/components/releases/ReleasesBulkDeleteModal.tsx b/web/src/components/releases/ReleasesBulkDeleteModal.tsx new file mode 100644 index 00000000..69ab031c --- /dev/null +++ b/web/src/components/releases/ReleasesBulkDeleteModal.tsx @@ -0,0 +1,41 @@ +import { Button, Group, Modal, Stack, Text } from "@mantine/core"; + +interface ReleasesBulkDeleteModalProps { + opened: boolean; + onClose: () => void; + onConfirm: () => void; + count: number; + isPending: boolean; +} + +/** Confirmation modal for bulk-deleting ledger entries. + * Hard-deletes are reversible by the upstream re-poll, so we surface that + * caveat in the body — users typically want Dismiss, not Delete. */ +export function ReleasesBulkDeleteModal({ + opened, + onClose, + onConfirm, + count, + isPending, +}: ReleasesBulkDeleteModalProps) { + const noun = count === 1 ? "release" : "releases"; + return ( + + + + This will hard-delete {count} {noun} from the ledger and clear the + affected sources' cache so they re-fetch on the next poll. The + releases will reappear if the upstream still lists them. + + + + + + + + ); +} diff --git a/web/src/components/releases/ReleasesTable.tsx b/web/src/components/releases/ReleasesTable.tsx new file mode 100644 index 00000000..3a75e6ee --- /dev/null +++ b/web/src/components/releases/ReleasesTable.tsx @@ -0,0 +1,238 @@ +import { + ActionIcon, + Anchor, + Badge, + Checkbox, + Group, + Stack, + Table, + Text, + Tooltip, +} from "@mantine/core"; +import { + IconCheck, + IconExternalLink, + IconTrash, + IconX, +} from "@tabler/icons-react"; +import { format } from "date-fns"; +import { Link } from "react-router-dom"; +import type { ReleaseLedgerEntry, ReleaseSource } from "@/api/releases"; +import { MediaUrlIcon } from "./MediaUrlIcon"; + +const STATE_BADGE: Record = { + announced: { color: "blue", label: "New" }, + marked_acquired: { color: "green", label: "Acquired" }, + dismissed: { color: "gray", label: "Dismissed" }, + hidden: { color: "gray", label: "Hidden" }, +}; + +interface ReleasesTableProps { + entries: ReleaseLedgerEntry[]; + sourceById: Map; + selected: Set; + onToggleOne: (id: string) => void; + onToggleAll: () => void; + onDismiss: (id: string) => void; + onMarkAcquired: (id: string) => void; + onDelete: (id: string) => void; + /** When true, render a Series column linking to the series detail page. + * Off when the table is already scoped to a single series. */ + showSeriesColumn?: boolean; + /** Disable per-row action buttons while a mutation is in flight. */ + isDismissPending?: boolean; + isMarkAcquiredPending?: boolean; + isDeletePending?: boolean; + /** Visual density. The page-level inbox uses "sm"; the embedded panel + * uses "xs" so it doesn't dominate the surrounding card. */ + verticalSpacing?: "xs" | "sm"; +} + +function formatChapterVolume(entry: ReleaseLedgerEntry): string { + const hasChapter = entry.chapter !== null && entry.chapter !== undefined; + const hasVolume = entry.volume !== null && entry.volume !== undefined; + if (!hasChapter && !hasVolume) return "—"; + const chapter = hasChapter ? `Ch ${entry.chapter}` : ""; + const volume = hasVolume + ? hasChapter + ? ` · Vol ${entry.volume}` + : `Vol ${entry.volume}` + : ""; + return `${chapter}${volume}`; +} + +export function ReleasesTable({ + entries, + sourceById, + selected, + onToggleOne, + onToggleAll, + onDismiss, + onMarkAcquired, + onDelete, + showSeriesColumn = false, + isDismissPending = false, + isMarkAcquiredPending = false, + isDeletePending = false, + verticalSpacing = "sm", +}: ReleasesTableProps) { + const allSelected = + entries.length > 0 && entries.every((e) => selected.has(e.id)); + const someSelected = entries.some((e) => selected.has(e.id)) && !allSelected; + + return ( +
+ + + + + + {showSeriesColumn && Series} + Ch / Vol + Source / Group + Lang + State + Observed + + + + + {entries.map((entry) => { + const stateInfo = STATE_BADGE[entry.state] ?? { + color: "gray", + label: entry.state, + }; + const isSelected = selected.has(entry.id); + const source = sourceById.get(entry.sourceId); + const sourceLabel = + source?.displayName ?? `${entry.sourceId.slice(0, 8)}…`; + return ( + + + onToggleOne(entry.id)} + /> + + {showSeriesColumn && ( + + + {entry.seriesTitle.length > 0 + ? entry.seriesTitle + : `${entry.seriesId.slice(0, 8)}…`} + + + )} + + + {formatChapterVolume(entry)} + + + + + {entry.groupOrUploader && + entry.groupOrUploader !== sourceLabel && ( + {entry.groupOrUploader} + )} + + {sourceLabel} + + + + + {entry.language ?? "—"} + + + + {stateInfo.label} + + + + + {format(new Date(entry.observedAt), "yyyy-MM-dd")} + + + + + + + + + + {entry.mediaUrl && ( + + )} + {entry.state === "announced" && ( + <> + + onMarkAcquired(entry.id)} + aria-label="Mark acquired" + > + + + + + onDismiss(entry.id)} + aria-label="Dismiss" + > + + + + + )} + + onDelete(entry.id)} + aria-label="Delete" + > + + + + + + + ); + })} + +
+ ); +} diff --git a/web/src/components/series/SeriesReleasesPanel.test.tsx b/web/src/components/series/SeriesReleasesPanel.test.tsx index 9084928d..5a0604f8 100644 --- a/web/src/components/series/SeriesReleasesPanel.test.tsx +++ b/web/src/components/series/SeriesReleasesPanel.test.tsx @@ -1,11 +1,19 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import { + type BulkReleaseActionResponse, type PaginatedReleases, type ReleaseLedgerEntry, + type ReleaseSource, releaseSourcesApi, releasesApi, } from "@/api/releases"; -import { renderWithProviders, screen, userEvent, waitFor } from "@/test/utils"; +import { + renderWithProviders, + screen, + userEvent, + waitFor, + within, +} from "@/test/utils"; import { SeriesReleasesPanel } from "./SeriesReleasesPanel"; vi.mock("@/api/releases", () => ({ @@ -15,6 +23,8 @@ vi.mock("@/api/releases", () => ({ patchEntry: vi.fn(), dismiss: vi.fn(), markAcquired: vi.fn(), + delete: vi.fn(), + bulk: vi.fn(), }, releaseSourcesApi: { list: vi.fn(), @@ -61,23 +71,51 @@ function paginated(entries: ReleaseLedgerEntry[]): PaginatedReleases { const list = vi.mocked(releasesApi.listForSeries); const dismiss = vi.mocked(releasesApi.dismiss); const markAcquired = vi.mocked(releasesApi.markAcquired); +const deleteRelease = vi.mocked(releasesApi.delete); +const bulk = vi.mocked(releasesApi.bulk); + +/** The panel collapses by default; tests expand it once before asserting. */ +async function expandPanel() { + const user = userEvent.setup(); + const toggle = await screen.findByRole("button", { + name: /expand releases/i, + }); + await user.click(toggle); +} describe("SeriesReleasesPanel", () => { beforeEach(() => { vi.clearAllMocks(); - // Avoid an unused-import warning while keeping the api mocked. - void releaseSourcesApi; + vi.mocked(releaseSourcesApi.list).mockResolvedValue([]); + }); + + it("collapses by default and only renders the body after the user expands", async () => { + list.mockResolvedValue( + paginated([entry({ id: "a", groupOrUploader: "Group-A" })]), + ); + renderWithProviders(); + // Header carries the expand affordance; while collapsed, body content sits + // in an aria-hidden subtree (Mantine's Collapse) so the toggle's a11y name + // is "Expand releases" and row buttons are hidden from the a11y tree. + await screen.findByRole("button", { name: /expand releases/i }); + expect( + screen.queryByRole("button", { name: /dismiss/i }), + ).not.toBeInTheDocument(); + await expandPanel(); + await screen.findByRole("button", { name: /collapse releases/i }); + await screen.findByRole("button", { name: /dismiss/i, hidden: true }); }); it("renders an empty-state message when no releases exist", async () => { list.mockResolvedValueOnce(paginated([])); renderWithProviders(); + await expandPanel(); await waitFor(() => { expect(screen.getByText(/no releases yet/i)).toBeInTheDocument(); }); }); - it("groups entries by chapter/volume and renders source rows", async () => { + it("renders one row per ledger entry with the chapter label repeated", async () => { list.mockResolvedValueOnce( paginated([ entry({ id: "a", chapter: 143, groupOrUploader: "Group-A" }), @@ -96,17 +134,51 @@ describe("SeriesReleasesPanel", () => { ]), ); renderWithProviders(); + await expandPanel(); await waitFor(() => { expect(screen.getByText("Group-A")).toBeInTheDocument(); }); expect(screen.getByText("Group-B")).toBeInTheDocument(); expect(screen.getByText("Group-C")).toBeInTheDocument(); - // Ch 143 is shared by Group-A and Group-B but only renders the cell label - // on the first row of the group (others get an empty cell). - expect(screen.getAllByText(/Ch 143/)).toHaveLength(1); + // Flat rows: each row carries its own chapter label. Two rows for Ch 143 + // (Group-A and Group-B), one row for Ch 142. + expect(screen.getAllByText(/Ch 143/)).toHaveLength(2); expect(screen.getAllByText(/Ch 142/)).toHaveLength(1); }); + it("renders the source display name from the sources list", async () => { + list.mockResolvedValueOnce( + paginated([ + entry({ + id: "a", + sourceId: "11111111-1111-1111-1111-111111111111", + groupOrUploader: "tsuna69", + }), + ]), + ); + vi.mocked(releaseSourcesApi.list).mockResolvedValue([ + { + id: "11111111-1111-1111-1111-111111111111", + pluginId: "release-nyaa", + sourceKey: "nyaa:user:tsuna69", + displayName: "Nyaa - tsuna69", + kind: "rss_uploader", + pollIntervalS: 3600, + enabled: true, + config: null, + createdAt: "2026-01-01T00:00:00Z", + updatedAt: "2026-01-01T00:00:00Z", + } as ReleaseSource, + ]); + renderWithProviders(); + await expandPanel(); + await waitFor(() => { + expect(screen.getByText("Nyaa - tsuna69")).toBeInTheDocument(); + }); + // The UUID-prefix fallback should not appear once the join resolves. + expect(screen.queryByText(/source: 11111111…/)).not.toBeInTheDocument(); + }); + it("dismisses an announced entry via the dismiss action", async () => { list.mockResolvedValue( paginated([entry({ id: "a", groupOrUploader: "OnlyGroup" })]), @@ -114,8 +186,12 @@ describe("SeriesReleasesPanel", () => { dismiss.mockResolvedValueOnce(entry({ id: "a", state: "dismissed" })); const user = userEvent.setup(); renderWithProviders(); + await expandPanel(); await screen.findByText("OnlyGroup"); - const dismissButton = screen.getByRole("button", { name: /dismiss/i }); + const dismissButton = await screen.findByRole("button", { + name: /dismiss/i, + hidden: true, + }); await user.click(dismissButton); await waitFor(() => { expect(dismiss).toHaveBeenCalledWith("a"); @@ -131,13 +207,137 @@ describe("SeriesReleasesPanel", () => { ); const user = userEvent.setup(); renderWithProviders(); + await expandPanel(); await screen.findByText("OnlyGroup"); - const acquireButton = screen.getByRole("button", { + const acquireButton = await screen.findByRole("button", { name: /mark acquired/i, + hidden: true, }); await user.click(acquireButton); await waitFor(() => { expect(markAcquired).toHaveBeenCalledWith("a"); }); }); + + it("hard-deletes a row via the delete action", async () => { + list.mockResolvedValue( + paginated([entry({ id: "a", groupOrUploader: "OnlyGroup" })]), + ); + deleteRelease.mockResolvedValueOnce({ + affectedReleaseIds: ["a"], + affectedSeriesIds: [SERIES_ID], + affectedSourceIds: [], + } as BulkReleaseActionResponse); + const user = userEvent.setup(); + renderWithProviders(); + await expandPanel(); + await screen.findByText("OnlyGroup"); + const deleteButton = await screen.findByRole("button", { + name: /delete/i, + hidden: true, + }); + await user.click(deleteButton); + await waitFor(() => { + expect(deleteRelease).toHaveBeenCalledWith("a"); + }); + }); + + it("bulk-marks selected entries as acquired", async () => { + list.mockResolvedValue( + paginated([ + entry({ id: "a", chapter: 200, groupOrUploader: "Group-A" }), + entry({ id: "b", chapter: 199, groupOrUploader: "Group-B" }), + entry({ id: "c", chapter: 198, groupOrUploader: "Group-C" }), + ]), + ); + bulk.mockResolvedValueOnce({ + affectedReleaseIds: ["a", "b"], + affectedSeriesIds: [SERIES_ID], + affectedSourceIds: [], + } as BulkReleaseActionResponse); + const user = userEvent.setup(); + renderWithProviders(); + await expandPanel(); + await screen.findByText("Group-A"); + // Select rows a and b individually. + await user.click( + await screen.findByRole("checkbox", { + name: "Select release a", + hidden: true, + }), + ); + await user.click( + await screen.findByRole("checkbox", { + name: "Select release b", + hidden: true, + }), + ); + // Action bar appears with the count. Find the bulk action by walking up + // from the "2 selected" label — the per-row "Mark acquired" buttons share + // the same accessible name, so role-by-name returns multiple. + const banner = screen + .getByText("2 selected") + .closest("div.mantine-Card-root"); + if (!banner) throw new Error("bulk banner not found"); + const bulkButton = within(banner as HTMLElement).getByRole("button", { + name: /mark acquired/i, + }); + await user.click(bulkButton); + await waitFor(() => { + expect(bulk).toHaveBeenCalledWith({ + ids: ["a", "b"], + action: "mark-acquired", + }); + }); + }); + + it("bulk-deletes via the Delete button after the modal confirm", async () => { + list.mockResolvedValue( + paginated([ + entry({ id: "a", chapter: 200, groupOrUploader: "Group-A" }), + entry({ id: "b", chapter: 199, groupOrUploader: "Group-B" }), + ]), + ); + bulk.mockResolvedValueOnce({ + affectedReleaseIds: ["a", "b"], + affectedSeriesIds: [SERIES_ID], + affectedSourceIds: [], + } as BulkReleaseActionResponse); + const user = userEvent.setup(); + renderWithProviders(); + await expandPanel(); + await screen.findByText("Group-A"); + await user.click( + await screen.findByRole("checkbox", { + name: "Select release a", + hidden: true, + }), + ); + await user.click( + await screen.findByRole("checkbox", { + name: "Select release b", + hidden: true, + }), + ); + // Open the bulk-delete modal from the action bar (scoped to the banner + // because per-row Delete buttons share the accessible name). + const banner = screen + .getByText("2 selected") + .closest("div.mantine-Card-root"); + if (!banner) throw new Error("bulk banner not found"); + await user.click( + within(banner as HTMLElement).getByRole("button", { name: /^delete$/i }), + ); + // Confirm in the modal — its button label includes the count. + const confirmButton = await screen.findByRole("button", { + name: /delete 2 releases/i, + }); + await user.click(confirmButton); + await waitFor(() => { + expect(bulk).toHaveBeenCalledWith({ + ids: ["a", "b"], + action: "delete", + }); + }); + }); }); diff --git a/web/src/components/series/SeriesReleasesPanel.tsx b/web/src/components/series/SeriesReleasesPanel.tsx index 96e6c027..1603a436 100644 --- a/web/src/components/series/SeriesReleasesPanel.tsx +++ b/web/src/components/series/SeriesReleasesPanel.tsx @@ -2,29 +2,34 @@ import { ActionIcon, Anchor, Badge, + Box, Card, + Collapse, Group, Loader, Stack, - Table, Text, Tooltip, } from "@mantine/core"; +import { useDisclosure } from "@mantine/hooks"; import { IconBellOff, IconBellRinging, - IconCheck, - IconExternalLink, + IconChevronDown, + IconChevronRight, IconRss, - IconX, } from "@tabler/icons-react"; -import { format } from "date-fns"; -import { useMemo, useState } from "react"; -import type { ReleaseLedgerEntry } from "@/api/releases"; -import { MediaUrlIcon } from "@/components/releases/MediaUrlIcon"; +import { useEffect, useMemo, useState } from "react"; +import type { BulkReleaseAction, ReleaseSource } from "@/api/releases"; +import { ReleasesBulkActionBar } from "@/components/releases/ReleasesBulkActionBar"; +import { ReleasesBulkDeleteModal } from "@/components/releases/ReleasesBulkDeleteModal"; +import { ReleasesTable } from "@/components/releases/ReleasesTable"; import { + useBulkReleaseAction, + useDeleteRelease, useDismissRelease, useMarkReleaseAcquired, + useReleaseSources, useSeriesReleases, } from "@/hooks/useReleases"; import { useUserPreference } from "@/hooks/useUserPreference"; @@ -33,24 +38,11 @@ interface SeriesReleasesPanelProps { seriesId: string; } -const STATE_BADGE: Record = { - announced: { color: "blue", label: "New" }, - marked_acquired: { color: "green", label: "Acquired" }, - dismissed: { color: "gray", label: "Dismissed" }, - hidden: { color: "gray", label: "Hidden" }, -}; - -interface GroupedKey { - chapter: number | null | undefined; - volume: number | null | undefined; -} - -function groupKey(entry: ReleaseLedgerEntry): string { - return `${entry.chapter ?? "_"}::${entry.volume ?? "_"}`; -} - export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { const [showDismissed, setShowDismissed] = useState(false); + // Releases panel collapses by default — series detail is the user's main + // landing point and the panel can grow long. They open it deliberately. + const [opened, { toggle }] = useDisclosure(false); const stateFilter = showDismissed ? undefined : "announced"; // Per-user mute. Persisted via the user_preferences store with localStorage @@ -71,29 +63,56 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { state: stateFilter, pageSize: 100, }); + const { data: sources } = useReleaseSources(); const dismiss = useDismissRelease(); const markAcquired = useMarkReleaseAcquired(); + const deleteRelease = useDeleteRelease(); + const bulk = useBulkReleaseAction(); - const groups = useMemo(() => { - const entries = data?.data ?? []; - const map = new Map< - string, - { key: GroupedKey; entries: ReleaseLedgerEntry[] } - >(); - for (const entry of entries) { - const k = groupKey(entry); - const existing = map.get(k); - if (existing) { - existing.entries.push(entry); + const entries = data?.data ?? []; + const [selected, setSelected] = useState>(new Set()); + const [confirmBulkDelete, { open: openBulkDelete, close: closeBulkDelete }] = + useDisclosure(false); + // Drop selections when the visible set changes — IDs that fell off screen + // shouldn't quietly remain selected for the next bulk action. + // biome-ignore lint/correctness/useExhaustiveDependencies: deps are change-triggers + useEffect(() => { + setSelected(new Set()); + }, [showDismissed, seriesId]); + const toggleAll = () => { + setSelected((prev) => { + const allSelected = + entries.length > 0 && entries.every((e) => prev.has(e.id)); + const next = new Set(prev); + if (allSelected) { + for (const e of entries) next.delete(e.id); } else { - map.set(k, { - key: { chapter: entry.chapter, volume: entry.volume }, - entries: [entry], - }); + for (const e of entries) next.add(e.id); } - } - return Array.from(map.values()); - }, [data?.data]); + return next; + }); + }; + const toggleOne = (id: string) => { + setSelected((prev) => { + const next = new Set(prev); + if (next.has(id)) next.delete(id); + else next.add(id); + return next; + }); + }; + const runBulk = (action: BulkReleaseAction) => { + const ids = Array.from(selected); + if (ids.length === 0) return; + bulk.mutate({ ids, action }, { onSuccess: () => setSelected(new Set()) }); + }; + + // Same client-side join the inbox uses: keep the ledger DTO lean while + // showing a human label instead of a UUID prefix. + const sourceById = useMemo(() => { + const map = new Map(); + for (const s of sources ?? []) map.set(s.id, s); + return map; + }, [sources]); if (isLoading) { return ( @@ -107,182 +126,114 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { } return ( - - - - - - Releases - - {data?.total ?? 0} - - {isMuted && ( - - Muted - - )} - - - + + + + - + ) : ( + + )} + + Releases + + {data?.total ?? 0} + + {isMuted && ( + + Muted + + )} + + + - {isMuted ? ( - - ) : ( - - )} - - - setShowDismissed((prev) => !prev)} - > - {showDismissed ? "Hide dismissed" : "Show all states"} - + + {isMuted ? ( + + ) : ( + + )} + + + {opened && ( + setShowDismissed((prev) => !prev)} + > + {showDismissed ? "Hide dismissed" : "Show all states"} + + )} + -
- {groups.length === 0 ? ( - - No releases yet. Once a release source picks this series up, new - chapters/volumes will land here. - - ) : ( - - - - Ch / Vol - Source / Group - Lang - State - Observed - - - - - {groups.map(({ key, entries }) => - entries.map((entry, idx) => { - const stateInfo = STATE_BADGE[entry.state] ?? { - color: "gray", - label: entry.state, - }; - const isFirst = idx === 0; - return ( - - - {isFirst ? ( - - {key.chapter !== null && key.chapter !== undefined - ? `Ch ${key.chapter}` - : ""} - {key.volume !== null && key.volume !== undefined - ? key.chapter !== null && - key.chapter !== undefined - ? ` · Vol ${key.volume}` - : `Vol ${key.volume}` - : ""} - {!key.chapter && !key.volume ? "—" : ""} - - ) : null} - - - - {entry.groupOrUploader && ( - {entry.groupOrUploader} - )} - - source: {entry.sourceId.slice(0, 8)}… - - - - - {entry.language ?? "—"} - - - - {stateInfo.label} - - - - - {format(new Date(entry.observedAt), "yyyy-MM-dd")} - - - - - - - - - - {entry.mediaUrl && ( - - )} - {entry.state === "announced" && ( - <> - - markAcquired.mutate(entry.id)} - aria-label="Mark acquired" - > - - - - - dismiss.mutate(entry.id)} - aria-label="Dismiss" - > - - - - - )} - - - - ); - }), - )} - -
- )} - - + + {selected.size > 0 && ( + + setSelected(new Set())} + onDeleteClick={openBulkDelete} + /> + + )} + {entries.length === 0 ? ( + + No releases yet. Once a release source picks this series up, new + chapters/volumes will land here. + + ) : ( + dismiss.mutate(id)} + onMarkAcquired={(id) => markAcquired.mutate(id)} + onDelete={(id) => deleteRelease.mutate(id)} + isDismissPending={dismiss.isPending} + isMarkAcquiredPending={markAcquired.isPending} + isDeletePending={deleteRelease.isPending} + verticalSpacing="xs" + /> + )} + + + + { + runBulk("delete"); + closeBulkDelete(); + }} + count={selected.size} + isPending={bulk.isPending} + /> + ); } diff --git a/web/src/pages/ReleasesInbox.test.tsx b/web/src/pages/ReleasesInbox.test.tsx index e409ac3d..7626881f 100644 --- a/web/src/pages/ReleasesInbox.test.tsx +++ b/web/src/pages/ReleasesInbox.test.tsx @@ -3,6 +3,7 @@ import { type PaginatedReleases, type ReleaseFacets, type ReleaseLedgerEntry, + type ReleaseSource, releaseSourcesApi, releasesApi, } from "@/api/releases"; @@ -65,18 +66,34 @@ function emptyFacets(): ReleaseFacets { return { languages: [], libraries: [], series: [] }; } +function source(over: Partial = {}): ReleaseSource { + return { + id: "11111111-1111-1111-1111-111111111111", + displayName: "MangaUpdates Releases", + sourceKey: "default", + pluginId: "release-mangaupdates", + kind: "metadata-feed", + enabled: true, + pollIntervalS: 86400, + createdAt: "2026-05-01T00:00:00Z", + updatedAt: "2026-05-01T00:00:00Z", + ...over, + } as ReleaseSource; +} + const list = vi.mocked(releasesApi.listInbox); const facets = vi.mocked(releasesApi.facets); const bulk = vi.mocked(releasesApi.bulk); const remove = vi.mocked(releasesApi.delete); +const sourcesList = vi.mocked(releaseSourcesApi.list); describe("ReleasesInbox", () => { beforeEach(() => { vi.clearAllMocks(); useReleaseAnnouncementsStore.getState().reset(); useReleaseAnnouncementsStore.getState().bump(); - void releaseSourcesApi; facets.mockResolvedValue(emptyFacets()); + sourcesList.mockResolvedValue([source()]); }); it("renders releases and resets the unseen badge on mount", async () => { @@ -99,6 +116,23 @@ describe("ReleasesInbox", () => { }); }); + it("renders the source's display name instead of a UUID", async () => { + list.mockResolvedValueOnce(paginated([entry()])); + renderWithProviders(); + expect( + await screen.findByText("MangaUpdates Releases"), + ).toBeInTheDocument(); + // The bare UUID slice should no longer appear in the row. + expect(screen.queryByText(/^source: 11111111…$/)).not.toBeInTheDocument(); + }); + + it("falls back to a truncated source UUID when the source is unknown", async () => { + sourcesList.mockResolvedValue([]); + list.mockResolvedValueOnce(paginated([entry()])); + renderWithProviders(); + expect(await screen.findByText(/^11111111…$/)).toBeInTheDocument(); + }); + it("shows empty-state copy when no entries match", async () => { list.mockResolvedValueOnce(paginated([])); renderWithProviders(); diff --git a/web/src/pages/ReleasesInbox.tsx b/web/src/pages/ReleasesInbox.tsx index fff69941..ade8744b 100644 --- a/web/src/pages/ReleasesInbox.tsx +++ b/web/src/pages/ReleasesInbox.tsx @@ -1,41 +1,27 @@ import { - ActionIcon, - Anchor, Badge, - Box, - Button, Card, - Checkbox, Group, Loader, - Modal, Pagination, Select, Stack, - Table, Text, Title, - Tooltip, } from "@mantine/core"; import { useDisclosure } from "@mantine/hooks"; -import { - IconCheck, - IconExternalLink, - IconRss, - IconTrash, - IconX, -} from "@tabler/icons-react"; -import { format } from "date-fns"; +import { IconRss } from "@tabler/icons-react"; import { useEffect, useMemo, useState } from "react"; -import { Link } from "react-router-dom"; import type { BulkReleaseAction, ReleaseFacets, ReleaseFacetsParams, ReleaseInboxParams, - ReleaseLedgerEntry, + ReleaseSource, } from "@/api/releases"; -import { MediaUrlIcon } from "@/components/releases/MediaUrlIcon"; +import { ReleasesBulkActionBar } from "@/components/releases/ReleasesBulkActionBar"; +import { ReleasesBulkDeleteModal } from "@/components/releases/ReleasesBulkDeleteModal"; +import { ReleasesTable } from "@/components/releases/ReleasesTable"; import { useDocumentTitle } from "@/hooks/useDocumentTitle"; import { useBulkReleaseAction, @@ -44,6 +30,7 @@ import { useMarkReleaseAcquired, useReleaseFacets, useReleaseInbox, + useReleaseSources, } from "@/hooks/useReleases"; import { useReleaseAnnouncementsStore } from "@/store/releaseAnnouncementsStore"; @@ -54,13 +41,6 @@ const STATE_OPTIONS = [ { value: "dismissed", label: "Dismissed" }, ]; -const STATE_BADGE: Record = { - announced: { color: "blue", label: "New" }, - marked_acquired: { color: "green", label: "Acquired" }, - dismissed: { color: "gray", label: "Dismissed" }, - hidden: { color: "gray", label: "Hidden" }, -}; - const PAGE_SIZE = 50; const ALL_VALUE = "__all__"; @@ -160,6 +140,7 @@ export function ReleasesInbox() { const { data, isLoading, error } = useReleaseInbox(inboxParams); const { data: facets } = useReleaseFacets(facetsParams); + const { data: sources } = useReleaseSources(); const dismiss = useDismissRelease(); const markAcquired = useMarkReleaseAcquired(); const deleteRelease = useDeleteRelease(); @@ -181,11 +162,16 @@ export function ReleasesInbox() { const seriesOptions = useMemo(() => buildSeriesOptions(facets), [facets]); const libraryOptions = useMemo(() => buildLibraryOptions(facets), [facets]); const languageOptions = useMemo(() => buildLanguageOptions(facets), [facets]); + // Joining `sources` client-side keeps the inbox DTO lean: the source list + // is small and already cached, so a per-row label costs no extra fetch. + const sourceById = useMemo(() => { + const map = new Map(); + for (const s of sources ?? []) map.set(s.id, s); + return map; + }, [sources]); const allOnPageSelected = entries.length > 0 && entries.every((e) => selected.has(e.id)); - const someOnPageSelected = - entries.some((e) => selected.has(e.id)) && !allOnPageSelected; const toggleAllOnPage = () => { setSelected((prev) => { @@ -224,361 +210,146 @@ export function ReleasesInbox() { }; return ( - - - - - - Releases - - {total} total - - + + + + + Releases + + {total} total + + - - - { - setLibraryId(value ?? ALL_VALUE); - setPage(1); - }} - w={220} - allowDeselect={false} - searchable - comboboxProps={{ withinPortal: true }} - /> - { - setSeriesId(value ?? ALL_VALUE); - setPage(1); - }} - w={320} - allowDeselect={false} - searchable - nothingFoundMessage="No series with releases" - comboboxProps={{ withinPortal: true }} - /> - - + + + { + setLibraryId(value ?? ALL_VALUE); + setPage(1); + }} + w={220} + allowDeselect={false} + searchable + comboboxProps={{ withinPortal: true }} + /> + { + setSeriesId(value ?? ALL_VALUE); + setPage(1); + }} + w={320} + allowDeselect={false} + searchable + nothingFoundMessage="No series with releases" + comboboxProps={{ withinPortal: true }} + /> + + - {selected.size > 0 && ( - - - - {selected.size} selected - - - - - - - - - - )} + {selected.size > 0 && ( + setSelected(new Set())} + onDeleteClick={openBulkDelete} + sticky + /> + )} - {error && ( - - - Failed to load releases:{" "} - {error instanceof Error ? error.message : String(error)} - - - )} + {error && ( + + + Failed to load releases:{" "} + {error instanceof Error ? error.message : String(error)} + + + )} - {isLoading ? ( - - - - ) : entries.length === 0 ? ( - - - No releases match these filters. New chapters and volumes show up - here once a release source picks them up. - - - ) : ( - - - - - - - - Series - Ch / Vol - Source / Group - Lang - State - Observed - - - - - {entries.map((entry: ReleaseLedgerEntry) => { - const stateInfo = STATE_BADGE[entry.state] ?? { - color: "gray", - label: entry.state, - }; - const isSelected = selected.has(entry.id); - return ( - - - toggleOne(entry.id)} - /> - - - - {entry.seriesTitle.length > 0 - ? entry.seriesTitle - : `${entry.seriesId.slice(0, 8)}…`} - - - - - {entry.chapter !== null && entry.chapter !== undefined - ? `Ch ${entry.chapter}` - : ""} - {entry.volume !== null && entry.volume !== undefined - ? entry.chapter !== null && - entry.chapter !== undefined - ? ` · Vol ${entry.volume}` - : `Vol ${entry.volume}` - : ""} - {!entry.chapter && !entry.volume ? "—" : ""} - - - - - {entry.groupOrUploader && ( - {entry.groupOrUploader} - )} - - source: {entry.sourceId.slice(0, 8)}… - - - - - {entry.language ?? "—"} - - - - {stateInfo.label} - - - - - {format(new Date(entry.observedAt), "yyyy-MM-dd")} - - - - - - - - - - {entry.mediaUrl && ( - - )} - {entry.state === "announced" && ( - <> - - markAcquired.mutate(entry.id)} - aria-label="Mark acquired" - > - - - - - dismiss.mutate(entry.id)} - aria-label="Dismiss" - > - - - - - )} - - deleteRelease.mutate(entry.id)} - aria-label="Delete" - > - - - - - - - ); - })} - -
-
- )} + {isLoading ? ( + + + + ) : entries.length === 0 ? ( + + + No releases match these filters. New chapters and volumes show up + here once a release source picks them up. + + + ) : ( + + dismiss.mutate(id)} + onMarkAcquired={(id) => markAcquired.mutate(id)} + onDelete={(id) => deleteRelease.mutate(id)} + showSeriesColumn + isDismissPending={dismiss.isPending} + isMarkAcquiredPending={markAcquired.isPending} + isDeletePending={deleteRelease.isPending} + verticalSpacing="sm" + /> + + )} - {totalPages > 1 && ( - - - - )} -
+ {totalPages > 1 && ( + + + + )} - - - - This will hard-delete {selected.size}{" "} - {selected.size === 1 ? "release" : "releases"} from the ledger and - clear the affected sources' cache so they re-fetch on the next poll. - The releases will reappear if the upstream still lists them. - - - - - - - -
+ onConfirm={() => { + runBulk("delete"); + closeBulkDelete(); + }} + count={selected.size} + isPending={bulk.isPending} + /> + ); } diff --git a/web/src/pages/SeriesDetail.tsx b/web/src/pages/SeriesDetail.tsx index d1640894..542e66b7 100644 --- a/web/src/pages/SeriesDetail.tsx +++ b/web/src/pages/SeriesDetail.tsx @@ -1029,20 +1029,6 @@ export function SeriesDetail() { )} - {/* Release tracking (admin/editor surface; query stays cheap when collapsed). - Hidden on libraries with no covering release-source plugin. */} - {canEditSeries && releaseTrackingAvailable && ( - - )} - - {/* Releases panel: ledger entries grouped by chapter/volume. Shows - whenever the series has tracking enabled and a plugin can - actually deliver releases — otherwise the panel would render - an empty inbox with no path to ever populate. */} - {tracking?.tracked && releaseTrackingAvailable && ( - - )} - {/* External Links */} {series.externalLinks && series.externalLinks.length > 0 && ( @@ -1079,6 +1065,22 @@ export function SeriesDetail() { } /> )} + + {/* Release tracking (admin/editor surface; query stays cheap when collapsed). + Hidden on libraries with no covering release-source plugin. + Sits below the metadata panels: it's an action surface, not + identifying data. */} + {canEditSeries && releaseTrackingAvailable && ( + + )} + + {/* Releases panel: ledger entries grouped by chapter/volume. Shows + whenever the series has tracking enabled and a plugin can + actually deliver releases — otherwise the panel would render + an empty inbox with no path to ever populate. */} + {tracking?.tracked && releaseTrackingAvailable && ( + + )} {/* Bulk Selection Toolbar - shows when items are selected */} diff --git a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx index e269abf6..af89b590 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx @@ -131,6 +131,58 @@ describe("ReleaseTrackingSettings", () => { }); }); + it("Poll now spinner is per-row, not shared across rows", async () => { + // Two sources: the first poll is held in flight while we click the + // second. Only the first row should show a loading spinner. + list.mockResolvedValue([ + source({ + id: "11111111-1111-1111-1111-111111111111", + displayName: "Source A", + }), + source({ + id: "22222222-2222-2222-2222-222222222222", + displayName: "Source B", + sourceKey: "mu:other", + }), + ]); + + let resolveFirst: + | ((v: { status: string; message: string }) => void) + | null = null; + pollNow.mockImplementationOnce( + () => + new Promise((resolve) => { + resolveFirst = resolve; + }), + ); + + const user = userEvent.setup(); + renderWithProviders(); + await waitFor(() => { + expect(screen.getByText("Source A")).toBeInTheDocument(); + expect(screen.getByText("Source B")).toBeInTheDocument(); + }); + + const pollButtons = screen.getAllByLabelText("Poll now"); + expect(pollButtons).toHaveLength(2); + + await user.click(pollButtons[0]); + + await waitFor(() => { + expect(pollButtons[0]).toHaveAttribute("data-loading", "true"); + }); + // Crucially, the other row's button must NOT be in a loading state while + // row A's poll is in flight. + expect(pollButtons[1]).not.toHaveAttribute("data-loading", "true"); + expect(pollButtons[1]).not.toBeDisabled(); + + // Resolve the first request and verify the spinner clears. + resolveFirst?.({ status: "enqueued", message: "ok" }); + await waitFor(() => { + expect(pollButtons[0]).not.toHaveAttribute("data-loading", "true"); + }); + }); + it("plugin-sources dropdown lists release-source plugins by display name", async () => { list.mockResolvedValue([]); // One release-source plugin + one metadata plugin to confirm filtering. diff --git a/web/src/pages/settings/ReleaseTrackingSettings.tsx b/web/src/pages/settings/ReleaseTrackingSettings.tsx index 814df911..188d57c5 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.tsx @@ -27,7 +27,7 @@ import { } from "@tabler/icons-react"; import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"; import { formatDistanceToNow } from "date-fns"; -import { useMemo, useState } from "react"; +import { type Dispatch, type SetStateAction, useMemo, useState } from "react"; import { pluginsApi } from "@/api/plugins"; import type { ReleaseSource } from "@/api/releases"; import { settingsApi } from "@/api/settings"; @@ -77,6 +77,30 @@ export function ReleaseTrackingSettings() { const pollNow = usePollReleaseSourceNow(); const reset = useResetReleaseSource(); + // The mutation hooks expose a single shared `isPending` flag, which would + // light up the spinner on every row whenever any one row's request was in + // flight. Track in-flight `sourceId`s explicitly so each row's spinner + // reflects only that row's own request, even when multiple are pending + // concurrently. + const [pollingIds, setPollingIds] = useState>(new Set()); + const [resettingIds, setResettingIds] = useState>( + new Set(), + ); + + const addId = ( + setter: Dispatch>>, + id: string, + ) => setter((prev) => new Set(prev).add(id)); + const removeId = ( + setter: Dispatch>>, + id: string, + ) => + setter((prev) => { + const next = new Set(prev); + next.delete(id); + return next; + }); + return ( @@ -146,18 +170,26 @@ export function ReleaseTrackingSettings() { update: { pollIntervalS: seconds }, }) } - onPollNow={() => pollNow.mutate(source.id)} - pollNowPending={pollNow.isPending} + onPollNow={() => { + addId(setPollingIds, source.id); + pollNow.mutate(source.id, { + onSettled: () => removeId(setPollingIds, source.id), + }); + }} + pollNowPending={pollingIds.has(source.id)} onReset={() => { if ( window.confirm( `Reset "${source.displayName}"?\n\nThis deletes every release ledger row for this source and clears its poll state (etag, last poll time). User-managed settings (enabled, interval, name) are preserved. The next poll will re-record everything as new.\n\nThis cannot be undone.`, ) ) { - reset.mutate(source.id); + addId(setResettingIds, source.id); + reset.mutate(source.id, { + onSettled: () => removeId(setResettingIds, source.id), + }); } }} - resetPending={reset.isPending} + resetPending={resettingIds.has(source.id)} /> ))} From d769aa2325bd4172ad5b7e66e6702c8da9163ee4 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Tue, 5 May 2026 17:24:02 -0700 Subject: [PATCH 22/29] feat(release-tracking): replace dismissed toggle with New/All segmented filter on series panel The "Hide dismissed / Show all states" anchor only switched between announced-only and all-states, which conflated two different things the user wanted to hide. For series with hundreds of chapters where most rows have been marked acquired, the default view was already clean (acquired is excluded by the `announced` filter), but the toggle labels suggested it was only about dismissed entries. Rename to a SegmentedControl with "New" (announced only, the default) and "All" (every state). "New" makes it explicit that acquired and dismissed are both hidden, which matches the use case where a long backlog of acquired chapters would otherwise drown out unhandled announcements. The cross-series Releases inbox page keeps its own richer state filter and is unchanged. --- .../components/series/SeriesReleasesPanel.tsx | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/web/src/components/series/SeriesReleasesPanel.tsx b/web/src/components/series/SeriesReleasesPanel.tsx index 1603a436..ea40ff20 100644 --- a/web/src/components/series/SeriesReleasesPanel.tsx +++ b/web/src/components/series/SeriesReleasesPanel.tsx @@ -1,12 +1,12 @@ import { ActionIcon, - Anchor, Badge, Box, Card, Collapse, Group, Loader, + SegmentedControl, Stack, Text, Tooltip, @@ -39,11 +39,11 @@ interface SeriesReleasesPanelProps { } export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { - const [showDismissed, setShowDismissed] = useState(false); + const [stateView, setStateView] = useState<"new" | "all">("new"); // Releases panel collapses by default — series detail is the user's main // landing point and the panel can grow long. They open it deliberately. const [opened, { toggle }] = useDisclosure(false); - const stateFilter = showDismissed ? undefined : "announced"; + const stateFilter = stateView === "new" ? "announced" : undefined; // Per-user mute. Persisted via the user_preferences store with localStorage // caching + debounced server sync. @@ -78,7 +78,7 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { // biome-ignore lint/correctness/useExhaustiveDependencies: deps are change-triggers useEffect(() => { setSelected(new Set()); - }, [showDismissed, seriesId]); + }, [stateView, seriesId]); const toggleAll = () => { setSelected((prev) => { const allSelected = @@ -176,14 +176,16 @@ export function SeriesReleasesPanel({ seriesId }: SeriesReleasesPanelProps) { {opened && ( - setShowDismissed((prev) => !prev)} - > - {showDismissed ? "Hide dismissed" : "Show all states"} - + setStateView(v as "new" | "all")} + data={[ + { value: "new", label: "New" }, + { value: "all", label: "All" }, + ]} + aria-label="Release state filter" + /> )} From a57644c9e442485701a64780217e05fc1cd517e9 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Tue, 5 May 2026 18:44:59 -0700 Subject: [PATCH 23/29] fix(plugins): replay reverse-RPC events through task-local broadcaster Plugin reverse-RPC handlers (notably releases/record) emitted entity events through a long-lived broadcaster captured at plugin init, bypassing the per-task recording broadcaster the worker creates in distributed deployments. As a result, release_announced events from MangaUpdates (and any future reverse-RPC emitter) never reached the web server's SSE stream when workers ran in a separate container, so users never saw notification toasts or the sidebar badge update. Reverse-RPCs now carry an optional parentRequestId, set by the plugin SDK via AsyncLocalStorage. The host's response reader routes them to the originating caller's task instead of dispatching itself, so the worker's recording broadcaster (set as a tokio task-local around handler.handle) propagates into the dispatcher. ReleasesRequestHandler reads the task-local at emit time, lands the event in tasks.result.emitted_events, and TaskListener replays it on the web server. Backwards compatible: plugins without parentRequestId fall back to dispatch-on-reader (the prior behavior). The plugin SDK auto-stamps the field via AsyncLocalStorage so plugin authors don't see it. Drops the now-unused with_event_broadcaster builders on PluginManager, PluginHandle, and ReleasesRequestHandler. Includes tests for the protocol field round-trip, the task-local helper, and the new no-broadcaster-in-scope path on releases/record. --- plugins/sdk-typescript/src/host-rpc.ts | 7 + plugins/sdk-typescript/src/request-context.ts | 44 +++ plugins/sdk-typescript/src/server.ts | 15 +- plugins/sdk-typescript/src/types/rpc.ts | 9 + src/commands/serve.rs | 6 +- src/commands/worker.rs | 8 +- src/events/mod.rs | 2 + src/events/task_context.rs | 109 +++++++ src/services/plugin/handle.rs | 24 +- src/services/plugin/manager.rs | 21 -- src/services/plugin/protocol.rs | 12 + src/services/plugin/releases_handler.rs | 85 +++-- src/services/plugin/rpc.rs | 305 ++++++++++++++---- src/tasks/worker.rs | 38 ++- 14 files changed, 550 insertions(+), 135 deletions(-) create mode 100644 plugins/sdk-typescript/src/request-context.ts create mode 100644 src/events/task_context.rs diff --git a/plugins/sdk-typescript/src/host-rpc.ts b/plugins/sdk-typescript/src/host-rpc.ts index 37e114eb..6048bf8e 100644 --- a/plugins/sdk-typescript/src/host-rpc.ts +++ b/plugins/sdk-typescript/src/host-rpc.ts @@ -19,6 +19,7 @@ * belongs to — it can fan out to both, and at most one will match. */ +import { currentParentRequestId } from "./request-context.js"; import type { JsonRpcError, JsonRpcRequest } from "./types/rpc.js"; /** Write function signature for sending JSON-RPC requests. */ @@ -79,11 +80,17 @@ export class HostRpcClient { */ async call(method: string, params?: unknown): Promise { const id = this.nextId++; + // Stamp the forward call we're inside so the host can route this + // reverse-RPC back to the originating caller's task. Lifted from the + // `request-context` async-local storage that `server.ts` sets around + // every forward-request handler. + const parent = currentParentRequestId(); const request: JsonRpcRequest = { jsonrpc: "2.0", id, method, params, + ...(parent !== undefined ? { parentRequestId: parent } : {}), }; return new Promise((resolve, reject) => { diff --git a/plugins/sdk-typescript/src/request-context.ts b/plugins/sdk-typescript/src/request-context.ts new file mode 100644 index 00000000..491963ae --- /dev/null +++ b/plugins/sdk-typescript/src/request-context.ts @@ -0,0 +1,44 @@ +/** + * Async-local context for the currently-handled forward request. + * + * When the SDK dispatches a forward call (e.g. `releases/poll`), it stores + * the call's `id` in this context for the duration of the handler. Any + * reverse-RPC the plugin makes while servicing that call (e.g. + * `releases/record` via `HostRpcClient.call`) reads the id and stamps it as + * `parentRequestId` on the outgoing request. + * + * The host uses `parentRequestId` to route the reverse-RPC back to the + * originating caller's tokio task, so emitted events land in the recording + * broadcaster scoped to that task and replay correctly in distributed + * deployments. Without this stamping, plugins that emit events via + * reverse-RPC would silently lose them on the worker. + * + * Plugin authors don't interact with this directly. The SDK's request + * dispatch (`server.ts`) sets it; `HostRpcClient.call` reads it. + */ + +import { AsyncLocalStorage } from "node:async_hooks"; + +const store = new AsyncLocalStorage(); + +/** + * Run `fn` with `forwardRequestId` as the current parent. Calls to + * `currentParentRequestId()` made inside `fn` (or anything it awaits) will + * see this value. + */ +export function runWithParentRequestId( + forwardRequestId: string | number | null, + fn: () => Promise, +): Promise { + return store.run(forwardRequestId, fn); +} + +/** + * Snapshot the current forward request id, or `undefined` if no forward + * request is on the call stack (e.g. background timers in the plugin that + * fire reverse-RPCs outside a forward-call context — those won't be replay- + * eligible, by design, since they don't belong to any task). + */ +export function currentParentRequestId(): string | number | null | undefined { + return store.getStore(); +} diff --git a/plugins/sdk-typescript/src/server.ts b/plugins/sdk-typescript/src/server.ts index 6b7172a0..f278091a 100644 --- a/plugins/sdk-typescript/src/server.ts +++ b/plugins/sdk-typescript/src/server.ts @@ -14,6 +14,7 @@ import { createInterface } from "node:readline"; import { PluginError } from "./errors.js"; import { HostRpcClient } from "./host-rpc.js"; import { createLogger, type Logger } from "./logger.js"; +import { runWithParentRequestId } from "./request-context.js"; import { PluginStorage } from "./storage.js"; import type { BookMetadataProvider, @@ -289,14 +290,12 @@ async function handleLine( logger.debug(`Received request: ${request.method}`, { id: request.id }); - const response = await handleRequest( - request, - manifest, - onInitialize, - router, - logger, - storage, - hostRpc, + // Run the request handler inside the parent-request async-local context. + // Reverse-RPCs the handler issues via `HostRpcClient.call` will read this + // and stamp `parentRequestId` so the host can route the call back to the + // originating task. See `request-context.ts`. + const response = await runWithParentRequestId(request.id, () => + handleRequest(request, manifest, onInitialize, router, logger, storage, hostRpc), ); if (response !== null) { writeResponse(response); diff --git a/plugins/sdk-typescript/src/types/rpc.ts b/plugins/sdk-typescript/src/types/rpc.ts index 92b61990..6fc57f80 100644 --- a/plugins/sdk-typescript/src/types/rpc.ts +++ b/plugins/sdk-typescript/src/types/rpc.ts @@ -7,6 +7,15 @@ export interface JsonRpcRequest { id: string | number | null; method: string; params?: unknown; + /** + * Reverse-RPC only: id of the forward call this plugin is currently + * servicing. Tells the host to route the reverse-RPC back to the + * originating caller's task so emitted events land in that caller's + * recording broadcaster (and replay correctly in distributed + * deployments). The SDK stamps this automatically via + * `AsyncLocalStorage` — plugin authors don't set it. + */ + parentRequestId?: string | number | null; } export interface JsonRpcSuccessResponse { diff --git a/src/commands/serve.rs b/src/commands/serve.rs index f8d2ebe8..985c7475 100644 --- a/src/commands/serve.rs +++ b/src/commands/serve.rs @@ -279,6 +279,11 @@ pub async fn serve_command(config_path: PathBuf) -> anyhow::Result<()> { )); // Initialize plugin manager (before workers so they can handle plugin tasks) + // + // Note: no broadcaster injection. Reverse-RPC handlers (e.g. + // `releases/record`) emit through the task-local recording broadcaster + // set up by `TaskWorker::run_task`, not through a manager-held one. + // See `crate::events::with_recording_broadcaster`. info!("Initializing plugin manager..."); let plugin_manager = Arc::new( crate::services::plugin::PluginManager::with_defaults(Arc::new( @@ -286,7 +291,6 @@ pub async fn serve_command(config_path: PathBuf) -> anyhow::Result<()> { )) .with_metrics_service(plugin_metrics_service.clone()) .with_plugin_file_storage(plugin_file_storage.clone()) - .with_event_broadcaster(event_broadcaster.clone()) .with_scheduler(scheduler.clone()), ); // Load enabled plugins from database diff --git a/src/commands/worker.rs b/src/commands/worker.rs index 058ded35..571b41b6 100644 --- a/src/commands/worker.rs +++ b/src/commands/worker.rs @@ -116,13 +116,17 @@ pub async fn worker_command(config_path: PathBuf) -> anyhow::Result<()> { let plugin_metrics_service = Arc::new(crate::services::PluginMetricsService::new()); // Initialize plugin manager for plugin auto-match tasks + // + // Note: no broadcaster injection. Reverse-RPC handlers (e.g. + // `releases/record`) emit through the task-local recording broadcaster + // set up by `TaskWorker::run_task`, not through a manager-held one. + // See `crate::events::with_recording_broadcaster`. info!("Initializing plugin manager..."); let plugin_manager = Arc::new( crate::services::plugin::PluginManager::with_defaults(Arc::new( db.sea_orm_connection().clone(), )) - .with_metrics_service(plugin_metrics_service) - .with_event_broadcaster(event_broadcaster.clone()), + .with_metrics_service(plugin_metrics_service), ); // Load enabled plugins from database match plugin_manager.load_all().await { diff --git a/src/events/mod.rs b/src/events/mod.rs index ee05e6a1..dc3e9a52 100644 --- a/src/events/mod.rs +++ b/src/events/mod.rs @@ -8,9 +8,11 @@ //! them on the web server when tasks complete. mod broadcaster; +mod task_context; mod types; pub use broadcaster::{EventBroadcaster, RecordedEvent}; +pub use task_context::{current_recording_broadcaster, with_recording_broadcaster}; // TaskProgress is part of the public API for task progress reporting #[allow(unused_imports)] pub use types::{ diff --git a/src/events/task_context.rs b/src/events/task_context.rs new file mode 100644 index 00000000..df5aad83 --- /dev/null +++ b/src/events/task_context.rs @@ -0,0 +1,109 @@ +//! Tokio task-local that exposes the "current task's recording broadcaster" +//! to code that runs inside a `TaskHandler::handle` call (and to any +//! reverse-RPC dispatch the handler triggers, since the dispatcher runs on +//! the caller's task — see `services::plugin::rpc`). +//! +//! Why this exists: when a worker runs a task in distributed mode (PostgreSQL +//! deployments), it creates a per-task recording broadcaster so every +//! `EntityChangeEvent` emitted during the task is captured into +//! `tasks.result.emitted_events` and replayed by the web server's +//! `TaskListener`. Code that emits events inside the task call stack receives +//! the broadcaster as a parameter — but plugin reverse-RPC handlers +//! (`releases/record` etc.) sit behind a JSON-RPC dispatcher that only +//! receives the request, not the broadcaster. Threading the broadcaster +//! through every layer of the dispatcher is invasive; the task-local is the +//! seam. +//! +//! The reverse-RPC dispatcher in [`crate::services::plugin::rpc`] runs the +//! dispatch on the *caller's* tokio task (the one that issued the forward +//! call), so the task-local set up by [`crate::tasks::worker`] is in scope. + +use std::sync::Arc; + +use super::EventBroadcaster; + +tokio::task_local! { + /// Recording broadcaster for the currently-executing task. Set by the + /// worker around `handler.handle(...)`. Read by reverse-RPC handlers via + /// [`current_recording_broadcaster`]. + static CURRENT_RECORDING_BROADCASTER: Arc; +} + +/// Run `fut` with `broadcaster` as the current task's recording broadcaster. +/// +/// Anything inside `fut` that calls [`current_recording_broadcaster`] sees +/// `Some(broadcaster)`. Outside this scope, callers see `None` and should +/// fall back to whatever they would have done previously (typically: skip +/// the emit, since out-of-task emits have nowhere to be replayed to). +pub async fn with_recording_broadcaster(broadcaster: Arc, fut: F) -> T +where + F: std::future::Future, +{ + CURRENT_RECORDING_BROADCASTER.scope(broadcaster, fut).await +} + +/// Snapshot the current task's recording broadcaster, if any. +/// +/// Returns `None` when called outside of a `with_recording_broadcaster` +/// scope (e.g. on the web server's request-handling tasks, where emits go +/// through the long-lived broadcaster directly). +pub fn current_recording_broadcaster() -> Option> { + CURRENT_RECORDING_BROADCASTER.try_with(|b| b.clone()).ok() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn returns_none_outside_scope() { + assert!(current_recording_broadcaster().is_none()); + } + + #[tokio::test] + async fn returns_broadcaster_inside_scope() { + let b = Arc::new(EventBroadcaster::new(8)); + let b_for_check = b.clone(); + with_recording_broadcaster(b, async move { + let inside = current_recording_broadcaster().expect("should be set"); + assert!(Arc::ptr_eq(&inside, &b_for_check)); + }) + .await; + assert!(current_recording_broadcaster().is_none()); + } + + #[tokio::test] + async fn nested_scope_overrides_outer() { + let outer = Arc::new(EventBroadcaster::new(8)); + let inner = Arc::new(EventBroadcaster::new(8)); + let inner_for_check = inner.clone(); + with_recording_broadcaster(outer.clone(), async move { + with_recording_broadcaster(inner, async move { + let seen = current_recording_broadcaster().expect("should be set"); + assert!(Arc::ptr_eq(&seen, &inner_for_check)); + }) + .await; + // Outer still in scope. + let seen = current_recording_broadcaster().expect("should be set"); + assert!(Arc::ptr_eq(&seen, &outer)); + }) + .await; + } + + /// task-locals propagate across `await` (same tokio task), which is what + /// we rely on when the reverse-RPC dispatcher runs on the caller's task. + #[tokio::test] + async fn propagates_across_await_chain() { + let b = Arc::new(EventBroadcaster::new(8)); + let b_for_check = b.clone(); + with_recording_broadcaster(b, async move { + // Yield then check — task-local survives across await boundaries + // on the same task. + tokio::task::yield_now().await; + tokio::task::yield_now().await; + let seen = current_recording_broadcaster().expect("should be set"); + assert!(Arc::ptr_eq(&seen, &b_for_check)); + }) + .await; + } +} diff --git a/src/services/plugin/handle.rs b/src/services/plugin/handle.rs index 66ba0f82..f93419f6 100644 --- a/src/services/plugin/handle.rs +++ b/src/services/plugin/handle.rs @@ -148,9 +148,6 @@ pub struct PluginHandle { /// Optional database connection for handlers that need DB access /// post-initialization (releases handler, etc.). release_db: Option, - /// Optional event broadcaster used by handlers that emit cross-process - /// notifications (releases handler emits `ReleaseAnnounced`). - event_broadcaster: Option>, /// Optional scheduler reference so the releases handler can reconcile /// release-source schedules immediately after `releases/register_sources`. scheduler: Option>>, @@ -167,7 +164,6 @@ impl PluginHandle { manifest: Arc::new(RwLock::new(None)), storage_handler: None, release_db: None, - event_broadcaster: None, scheduler: None, } } @@ -185,7 +181,6 @@ impl PluginHandle { manifest: Arc::new(RwLock::new(None)), storage_handler: Some(storage_handler), release_db: None, - event_broadcaster: None, scheduler: None, } } @@ -198,16 +193,6 @@ impl PluginHandle { self } - /// Attach an event broadcaster so the releases reverse-RPC handler can - /// emit `ReleaseAnnounced` events on inserts. Builder-style. - pub fn with_event_broadcaster( - mut self, - broadcaster: Arc, - ) -> Self { - self.event_broadcaster = Some(broadcaster); - self - } - /// Attach a scheduler reference so the releases reverse-RPC handler can /// trigger a release-source reconcile when the plugin calls /// `releases/register_sources`. Builder-style. @@ -344,10 +329,14 @@ impl PluginHandle { // plugin declared `release_source` and we have a database // connection, install the releases handler too. Both happen under // the same write lock so the dispatcher sees them together. + // + // The releases handler emits `ReleaseAnnounced` through the + // task-local recording broadcaster set by `crate::tasks::worker` + // around the running task — no broadcaster injection needed here. + // See [`crate::events::with_recording_broadcaster`]. let manifest_for_ctx = manifest.clone(); let plugin_name = manifest.name.clone(); let release_db = self.release_db.clone(); - let event_broadcaster = self.event_broadcaster.clone(); let scheduler = self.scheduler.clone(); client .update_reverse_ctx(move |ctx| { @@ -357,9 +346,6 @@ impl PluginHandle { release_db, ) { let mut handler = ReleasesRequestHandler::new(db, plugin_name, cap); - if let Some(b) = event_broadcaster { - handler = handler.with_event_broadcaster(b); - } if let Some(s) = scheduler { handler = handler.with_scheduler(s); } diff --git a/src/services/plugin/manager.rs b/src/services/plugin/manager.rs index 27f0d61e..edd8bfd3 100644 --- a/src/services/plugin/manager.rs +++ b/src/services/plugin/manager.rs @@ -332,9 +332,6 @@ pub struct PluginManager { metrics_service: Option>, /// Optional plugin file storage for resolving plugin data directories plugin_file_storage: Option>, - /// Optional event broadcaster handed to per-plugin handles so reverse-RPC - /// handlers (releases/record) can emit cross-process notifications. - event_broadcaster: Option>, /// Optional scheduler handle so the releases reverse-RPC handler can /// trigger a release-source reconcile when a plugin calls /// `releases/register_sources`. @@ -353,7 +350,6 @@ impl PluginManager { health_check_handle: RwLock::new(None), metrics_service: None, plugin_file_storage: None, - event_broadcaster: None, scheduler: None, } } @@ -378,17 +374,6 @@ impl PluginManager { self } - /// Set the event broadcaster so per-plugin handles can emit - /// `ReleaseAnnounced` events from the reverse-RPC `releases/record` - /// path. Builder-style. - pub fn with_event_broadcaster( - mut self, - broadcaster: Arc, - ) -> Self { - self.event_broadcaster = Some(broadcaster); - self - } - /// Hand the scheduler to per-plugin handles so the releases reverse-RPC /// handler can reconcile release-source schedules when a plugin calls /// `releases/register_sources`. Builder-style. @@ -665,9 +650,6 @@ impl PluginManager { // Need to spawn/initialize the plugin let handle_config = self.create_plugin_config(&entry.db_config).await?; let mut handle = PluginHandle::new(handle_config).with_release_db(self.db.as_ref().clone()); - if let Some(ref b) = self.event_broadcaster { - handle = handle.with_event_broadcaster(b.clone()); - } if let Some(ref s) = self.scheduler { handle = handle.with_scheduler(s.clone()); } @@ -808,9 +790,6 @@ impl PluginManager { let storage_handler = StorageRequestHandler::new(self.db.as_ref().clone(), user_plugin.id); let mut handle = PluginHandle::new_with_storage(handle_config, storage_handler) .with_release_db(self.db.as_ref().clone()); - if let Some(ref b) = self.event_broadcaster { - handle = handle.with_event_broadcaster(b.clone()); - } if let Some(ref s) = self.scheduler { handle = handle.with_scheduler(s.clone()); } diff --git a/src/services/plugin/protocol.rs b/src/services/plugin/protocol.rs index 488b8ecd..c81aaaa2 100644 --- a/src/services/plugin/protocol.rs +++ b/src/services/plugin/protocol.rs @@ -62,6 +62,17 @@ pub struct JsonRpcRequest { pub method: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub params: Option, + /// Reverse-RPC only: id of the forward call the plugin is currently + /// servicing. Lets the host route the reverse-RPC back to the originating + /// caller's task so emitted events land in that caller's recording + /// broadcaster. Absent for forward calls and for plugins that predate the + /// field. + #[serde( + default, + rename = "parentRequestId", + skip_serializing_if = "Option::is_none" + )] + pub parent_request_id: Option, } impl JsonRpcRequest { @@ -73,6 +84,7 @@ impl JsonRpcRequest { id: id.into(), method: method.into(), params, + parent_request_id: None, } } diff --git a/src/services/plugin/releases_handler.rs b/src/services/plugin/releases_handler.rs index 0a59d1cb..12328f9c 100644 --- a/src/services/plugin/releases_handler.rs +++ b/src/services/plugin/releases_handler.rs @@ -57,9 +57,6 @@ pub struct ReleasesRequestHandler { /// to scope `releases/list_tracked` responses to what the plugin asked /// for. capability: ReleaseSourceCapability, - /// Optional event broadcaster used to emit `ReleaseAnnounced` events on - /// successful (non-deduped) `releases/record` inserts. - event_broadcaster: Option>, /// Optional scheduler reference used by `releases/register_sources` to /// reconcile schedules immediately after the source set changes. scheduler: Option>>, @@ -75,21 +72,10 @@ impl ReleasesRequestHandler { db, plugin_name, capability, - event_broadcaster: None, scheduler: None, } } - /// Attach an event broadcaster so the handler emits `ReleaseAnnounced` - /// events on inserts. Builder-style. - pub fn with_event_broadcaster( - mut self, - broadcaster: std::sync::Arc, - ) -> Self { - self.event_broadcaster = Some(broadcaster); - self - } - /// Attach a scheduler reference so `releases/register_sources` reconciles /// schedules without waiting for a server restart. Builder-style. pub fn with_scheduler(mut self, scheduler: Arc>) -> Self { @@ -342,11 +328,29 @@ impl ReleasesRequestHandler { warn!(error = %e, %series_id, "latest_known advance failed; ledger insert preserved"); } - if let Some(ref broadcaster) = self.event_broadcaster { + // Emit through the task-local recording broadcaster set up by + // `crate::tasks::worker` around the running task. This routes + // the event into `tasks.result.emitted_events` so the web + // server's `TaskListener` replays it to live SSE subscribers in + // distributed deployments. In single-process mode the same + // task-local points at the live broadcaster, so subscribers see + // the event directly. + // + // No task-local set means we're handling a reverse-RPC outside + // any task context (today: shouldn't happen for releases since + // every record path runs inside a poll task). We log and skip + // rather than silently emitting into a void. + if let Some(broadcaster) = crate::events::current_recording_broadcaster() { let _ = broadcaster.emit(crate::events::EntityChangeEvent::release_announced( &outcome.row, &self.plugin_name, )); + } else { + debug!( + series_id = %outcome.row.series_id, + plugin = %self.plugin_name, + "No recording broadcaster in scope; skipping release_announced emit" + ); } } @@ -1159,11 +1163,12 @@ mod tests { assert_eq!(resp.error.unwrap().code, error_codes::AUTH_FAILED); } - /// `releases/record` emits a `ReleaseAnnounced` event on insert and - /// suppresses it on dedup. + /// `releases/record` emits a `ReleaseAnnounced` event on insert (via the + /// task-local recording broadcaster set up by the worker) and suppresses + /// it on dedup. #[tokio::test] async fn record_emits_release_announced_on_insert_only() { - use crate::events::{EntityEvent, EventBroadcaster}; + use crate::events::{EntityEvent, EventBroadcaster, with_recording_broadcaster}; let (db, _t) = create_test_db().await; let conn = db.sea_orm_connection(); @@ -1176,8 +1181,7 @@ mod tests { conn.clone(), "release-nyaa".to_string(), make_capability(false, vec![]), - ) - .with_event_broadcaster(broadcaster.clone()); + ); let cand = good_candidate(series_id); let req = make_request( @@ -1185,7 +1189,12 @@ mod tests { json!({"sourceId": source_id, "candidate": cand}), ); - let first = handler.handle_request(&req).await; + let req_clone = req.clone(); + let handler_clone = handler.clone(); + let first = with_recording_broadcaster(broadcaster.clone(), async move { + handler_clone.handle_request(&req_clone).await + }) + .await; assert!(!first.is_error(), "unexpected error: {:?}", first.error); let body: RecordResponse = serde_json::from_value(first.result.unwrap()).unwrap(); assert!(!body.deduped); @@ -1210,7 +1219,12 @@ mod tests { } // Re-recording the same release dedups; no new event should fire. - let second = handler.handle_request(&req).await; + let req_clone = req.clone(); + let handler_clone = handler.clone(); + let second = with_recording_broadcaster(broadcaster.clone(), async move { + handler_clone.handle_request(&req_clone).await + }) + .await; let body: RecordResponse = serde_json::from_value(second.result.unwrap()).unwrap(); assert!(body.deduped); assert!( @@ -1219,6 +1233,33 @@ mod tests { ); } + /// Without a task-local recording broadcaster in scope, `releases/record` + /// completes successfully but emits no event (the operation is logged + /// at debug; we don't surface a fake "live" emit anywhere). + #[tokio::test] + async fn record_skips_emit_when_no_broadcaster_in_scope() { + let (db, _t) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_id) = setup(conn, "release-nyaa").await; + + let handler = ReleasesRequestHandler::new( + conn.clone(), + "release-nyaa".to_string(), + make_capability(false, vec![]), + ); + + let cand = good_candidate(series_id); + let req = make_request( + methods::RELEASES_RECORD, + json!({"sourceId": source_id, "candidate": cand}), + ); + + let resp = handler.handle_request(&req).await; + assert!(!resp.is_error(), "unexpected error: {:?}", resp.error); + let body: RecordResponse = serde_json::from_value(resp.result.unwrap()).unwrap(); + assert!(!body.deduped, "ledger row still inserted"); + } + #[tokio::test] async fn record_inserts_then_dedups() { let (db, _t) = create_test_db().await; diff --git a/src/services/plugin/rpc.rs b/src/services/plugin/rpc.rs index 2e3c03a2..fccfe664 100644 --- a/src/services/plugin/rpc.rs +++ b/src/services/plugin/rpc.rs @@ -10,7 +10,7 @@ use std::time::Duration; use serde::Serialize; use serde::de::DeserializeOwned; use serde_json::Value; -use tokio::sync::{Mutex, RwLock, oneshot}; +use tokio::sync::{Mutex, RwLock, mpsc}; use tokio::time::timeout; use tracing::{debug, error, warn}; @@ -141,9 +141,32 @@ impl From for RpcError { } } +/// Frame delivered from the response reader to a pending forward call. +/// +/// Forward calls await an `mpsc::Receiver` instead of a single +/// `oneshot::Receiver`. The reader pushes either: +/// - one `Response` (terminal — the receiver loop stops), or +/// - zero or more `ReverseRpc` frames (mid-flight — the caller dispatches +/// each one on its own tokio task and writes the response back to the +/// plugin), followed eventually by exactly one `Response`. +/// +/// Routing reverse-RPCs back to the caller (instead of dispatching them on +/// the reader task) is what lets task-local context — most importantly the +/// recording broadcaster set up by [`crate::tasks::worker`] — propagate into +/// the dispatcher. Without this, events emitted by reverse-RPC handlers +/// (like `releases/record`) would have no recording context and would never +/// reach the web server's SSE stream in distributed deployments. +enum PendingFrame { + /// The plugin returned a response for this forward call. Terminal. + Response(Result), + /// The plugin made a reverse-RPC call while servicing this forward + /// call. The caller must dispatch and write the response back. + ReverseRpc(JsonRpcRequest), +} + /// Pending request waiting for a response struct PendingRequest { - tx: oneshot::Sender>, + tx: mpsc::UnboundedSender, } /// JSON-RPC client for communicating with a plugin process @@ -244,7 +267,18 @@ impl RpcClient { .await } - /// Send a request and wait for a response with custom timeout + /// Send a request and wait for a response with custom timeout. + /// + /// While awaiting the response, this also services any reverse-RPC + /// requests the plugin makes that are tagged with `parent_request_id = + /// id` of this call. Dispatching here (rather than on the reader task) + /// keeps the dispatch on the caller's tokio task, so task-local state + /// (notably the recording broadcaster set by the worker) propagates into + /// the reverse-RPC handlers — see [`PendingFrame`] for context. + /// + /// The `request_timeout` bounds *the entire forward call*, including + /// any reverse-RPC servicing in between. That matches the previous + /// semantics from the caller's point of view. pub async fn call_with_timeout( &self, method: &str, @@ -277,6 +311,7 @@ impl RpcClient { } else { Some(params_value) }, + parent_request_id: None, }; let request_json = serde_json::to_string(&request)?; @@ -287,8 +322,11 @@ impl RpcClient { "Sending RPC request" ); - // Create response channel - let (tx, rx) = oneshot::channel(); + // Create response channel. Unbounded because reverse-RPCs are + // dispatched inline and the queue depth is naturally bounded by the + // plugin's behavior; bounding it would risk deadlock if the plugin + // bursts reverse-RPCs faster than the caller drains them. + let (tx, mut rx) = mpsc::unbounded_channel::(); { let mut pending = self.pending.lock().await; pending.insert(id, PendingRequest { tx }); @@ -309,20 +347,68 @@ impl RpcClient { process.write_line(&request_json).await?; } - // Wait for response with timeout + // Loop, servicing reverse-RPC frames until the response frame + // arrives or we time out. Dispatching reverse-RPCs here (on the + // caller's task) is what lets task-local recording broadcasters + // propagate into the handlers — see [`PendingFrame`]. debug!( id = id, timeout_ms = request_timeout.as_millis(), "Waiting for RPC response" ); - let result = match timeout(request_timeout, rx).await { + let response_result = timeout(request_timeout, async { + loop { + match rx.recv().await { + Some(PendingFrame::Response(result)) => return Ok::<_, RpcError>(result), + Some(PendingFrame::ReverseRpc(reverse_request)) => { + // Dispatch on this task so task-locals propagate. + let reverse_method = reverse_request.method.clone(); + let response = dispatch_reverse_rpc( + &reverse_method, + &reverse_request, + &self.reverse_ctx, + ) + .await; + // Write the response back to the plugin. Best-effort: + // a write failure here is logged but doesn't abort + // the forward call (the plugin may still complete). + match serde_json::to_string(&response) { + Ok(response_json) => { + let process_guard = self.process.lock().await; + if let Err(e) = process_guard.write_line(&response_json).await { + error!( + error = %e, + method = %reverse_method, + forward_id = id, + "Failed to write reverse-RPC response to plugin" + ); + } + } + Err(e) => { + error!( + error = %e, + method = %reverse_method, + "Failed to serialize reverse-RPC response" + ); + } + } + } + None => { + // Channel closed — plugin process died and the + // reader cancelled all pending requests. + return Err(RpcError::Cancelled); + } + } + } + }) + .await; + + let result = match response_result { Ok(Ok(result)) => { debug!(id = id, "RPC response received"); result } - Ok(Err(_)) => { - // Channel was closed - likely because the plugin process died - // and the response reader task cancelled all pending requests + Ok(Err(RpcError::Cancelled)) => { error!( id = id, method = method, @@ -331,8 +417,11 @@ impl RpcClient { self.remove_pending(id).await; return Err(RpcError::Cancelled); } + Ok(Err(e)) => { + self.remove_pending(id).await; + return Err(e); + } Err(_) => { - // Timeout error!( id = id, timeout_ms = request_timeout.as_millis(), @@ -373,7 +462,9 @@ impl RpcClient { { let mut pending = self.pending.lock().await; for (_, req) in pending.drain() { - let _ = req.tx.send(Err(RpcError::Cancelled)); + let _ = req + .tx + .send(PendingFrame::Response(Err(RpcError::Cancelled))); } } @@ -514,13 +605,19 @@ async fn dispatch_reverse_rpc( } } -/// Task that reads lines from the plugin process and dispatches them. +/// Task that reads lines from the plugin process and routes them. /// -/// Handles two types of messages: -/// 1. **Responses**: Lines with `result` or `error` → dispatched to pending requests -/// 2. **Reverse RPC requests**: Lines with `method` (e.g., `storage/*`, -/// `releases/*`) → permission-checked, then handled by the matching -/// handler and the response written back to the plugin's stdin +/// Handles three categories of message: +/// 1. **Responses**: Lines with `result` or `error` → routed to the matching +/// pending caller via [`PendingFrame::Response`]. +/// 2. **Reverse-RPC requests with a `parentRequestId`**: routed to the +/// pending caller of that forward call via [`PendingFrame::ReverseRpc`]. +/// The caller dispatches on its own tokio task so task-locals propagate. +/// 3. **Reverse-RPC requests without a `parentRequestId`** (legacy plugins +/// that predate the field, or true orphans): dispatched on the reader +/// task as before. These won't have a recording broadcaster in scope and +/// won't replay in distributed deployments — but that's no regression +/// from the prior behavior. async fn response_reader_task( process: Arc>, pending: Arc>>, @@ -585,16 +682,6 @@ async fn response_reader_task( .map(|m| m.to_string()); if let Some(method) = is_request { - // Reverse-RPC dispatch with uniform permission enforcement. - // - // 1. Parse the JSON as a full request (so we have the request id - // to bind to the response). - // 2. Look up the required capability from the permissions table. - // Methods without a mapping are treated as "method not found." - // 3. Check the plugin's manifest. Reject `Denied` with - // `AUTH_FAILED` so the plugin can distinguish "I'm calling the - // wrong namespace" (404) from "I'm not allowed to" (403-ish). - // 4. Dispatch to the right handler. let request: JsonRpcRequest = match serde_json::from_value(json_value) { Ok(r) => r, Err(e) => { @@ -602,29 +689,49 @@ async fn response_reader_task( continue; } }; - let request_id = request.id.clone(); - let response = dispatch_reverse_rpc(&method, &request, &reverse_ctx).await; - - let response_json = match serde_json::to_string(&response) { - Ok(j) => j, - Err(e) => { - error!(error = %e, method = %method, "Failed to serialize reverse-RPC response"); - // Best-effort fallback: return a generic internal error. - let fallback = JsonRpcResponse::error( - Some(request_id), - JsonRpcError::new( - error_codes::INTERNAL_ERROR, - "failed to serialize response", - ), - ); - serde_json::to_string(&fallback).unwrap_or_default() + // Try to route to the originating forward call so dispatch + // happens on the caller's task (and task-locals propagate). + let parent_id = request + .parent_request_id + .as_ref() + .and_then(parent_id_to_i64); + + if let Some(parent_id) = parent_id { + let routed = { + let pending_map = pending.lock().await; + pending_map.get(&parent_id).map(|p| p.tx.clone()) + }; + if let Some(tx) = routed { + if let Err(send_err) = tx.send(PendingFrame::ReverseRpc(request)) { + // Receiver dropped between lookup and send — race + // with timeout/shutdown. Fall back to dispatching + // on the reader so the plugin still gets a response. + let dropped = match send_err.0 { + PendingFrame::ReverseRpc(req) => req, + // Unreachable: we just constructed a ReverseRpc + // frame above, and `send` returns whatever it + // failed to deliver. + PendingFrame::Response(_) => continue, + }; + warn!( + method = %method, + parent_id = parent_id, + "Caller dropped pending channel; falling back to reader-task dispatch" + ); + dispatch_and_write(dropped, method.clone(), &reverse_ctx, &process).await; + } + continue; } - }; - let process_guard = process.lock().await; - if let Err(e) = process_guard.write_line(&response_json).await { - error!(error = %e, method = %method, "Failed to write reverse-RPC response to plugin"); + warn!( + method = %method, + parent_id = parent_id, + "Reverse-RPC parent request id not found in pending map; dispatching on reader" + ); } + + // No parent id, or parent not pending: dispatch on the reader. + dispatch_and_write(request, method, &reverse_ctx, &process).await; continue; } @@ -687,7 +794,7 @@ async fn response_reader_task( )) }; - if req.tx.send(result).is_err() { + if req.tx.send(PendingFrame::Response(result)).is_err() { debug!("Request {} receiver dropped", id); } } else { @@ -716,9 +823,49 @@ async fn response_reader_task( request_id = id, "Cancelling pending request due to plugin process exit" ); - let _ = req - .tx - .send(Err(RpcError::Process(ProcessError::ProcessTerminated))); + let _ = req.tx.send(PendingFrame::Response(Err(RpcError::Process( + ProcessError::ProcessTerminated, + )))); + } +} + +/// Coerce a reverse-RPC `parentRequestId` to the `i64` we use as our +/// pending-map key. Numbers map directly; strings parse as numbers (the host +/// only ever issues numeric ids, but the field type is `RequestId` for +/// protocol generality). +fn parent_id_to_i64(id: &RequestId) -> Option { + match id { + RequestId::Number(n) => Some(*n), + RequestId::String(s) => s.parse::().ok(), + } +} + +/// Dispatch a reverse-RPC on the *current* task and write the response back +/// to the plugin. Used as the fallback when no parent forward call is +/// available to dispatch on (legacy plugins, or the parent's caller has +/// already gone away). +async fn dispatch_and_write( + request: JsonRpcRequest, + method: String, + reverse_ctx: &Arc>, + process: &Arc>, +) { + let request_id = request.id.clone(); + let response = dispatch_reverse_rpc(&method, &request, reverse_ctx).await; + let response_json = match serde_json::to_string(&response) { + Ok(j) => j, + Err(e) => { + error!(error = %e, method = %method, "Failed to serialize reverse-RPC response"); + let fallback = JsonRpcResponse::error( + Some(request_id), + JsonRpcError::new(error_codes::INTERNAL_ERROR, "failed to serialize response"), + ); + serde_json::to_string(&fallback).unwrap_or_default() + } + }; + let process_guard = process.lock().await; + if let Err(e) = process_guard.write_line(&response_json).await { + error!(error = %e, method = %method, "Failed to write reverse-RPC response to plugin"); } } @@ -845,7 +992,10 @@ mod tests { /// Reverse-RPC dispatch should reject calls before the plugin has been /// initialized — at that point the host doesn't yet know the plugin's - /// capabilities. + /// capabilities. Returned as `METHOD_NOT_FOUND` (rather than + /// `AUTH_FAILED`) so plugin SDKs can retry with backoff to ride out the + /// brief init race; an `AUTH_FAILED` response would tell the SDK to + /// give up. See the doc comment on `dispatch_reverse_rpc`. #[tokio::test] async fn test_dispatch_rejects_before_init() { let ctx = Arc::new(RwLock::new(ReverseRpcContext::new())); @@ -856,7 +1006,7 @@ mod tests { ); let resp = dispatch_reverse_rpc(&request.method, &request, &ctx).await; assert!(resp.is_error()); - assert_eq!(resp.error.unwrap().code, error_codes::AUTH_FAILED); + assert_eq!(resp.error.unwrap().code, error_codes::METHOD_NOT_FOUND); } /// A plugin without `release_source` calling `releases/record` should be @@ -919,6 +1069,53 @@ mod tests { assert_eq!(resp.error.unwrap().code, error_codes::METHOD_NOT_FOUND); } + /// `parentRequestId` round-trips through serde with the camelCase wire + /// name and is omitted when None. This is the protocol contract we + /// share with the plugin SDK. + #[test] + fn parent_request_id_serializes_as_camel_case_and_omits_when_none() { + let mut req = JsonRpcRequest::new(42i64, "releases/record", Some(json!({"x": 1}))); + // Default: omitted on the wire. + let json = serde_json::to_string(&req).unwrap(); + assert!( + !json.contains("parentRequestId"), + "absent field should be skipped: {json}" + ); + + // Set: serialized as camelCase. + req.parent_request_id = Some(RequestId::Number(7)); + let json = serde_json::to_string(&req).unwrap(); + assert!( + json.contains("\"parentRequestId\":7"), + "expected camelCase parentRequestId in: {json}" + ); + + // Round-trip: a wire payload deserializes back with the field set. + let wire = r#"{"jsonrpc":"2.0","id":1,"method":"releases/record","parentRequestId":99}"#; + let parsed: JsonRpcRequest = serde_json::from_str(wire).unwrap(); + assert!(matches!( + parsed.parent_request_id, + Some(RequestId::Number(99)) + )); + } + + /// `parent_id_to_i64` accepts both numeric and string ids — we use it to + /// look up the parent forward call in the pending map, which is keyed by + /// `i64`. The host only ever issues numeric ids, but the protocol type + /// is `RequestId` for generality. + #[test] + fn parent_id_to_i64_handles_numeric_and_string_ids() { + assert_eq!(parent_id_to_i64(&RequestId::Number(42)), Some(42)); + assert_eq!( + parent_id_to_i64(&RequestId::String("17".to_string())), + Some(17) + ); + assert_eq!( + parent_id_to_i64(&RequestId::String("nope".to_string())), + None + ); + } + /// Verify that dropping an RpcClient aborts the reader task, releasing the /// Arc> so kill_on_drop(true) can fire on the child process. #[tokio::test] diff --git a/src/tasks/worker.rs b/src/tasks/worker.rs index a7bf9438..70ca269b 100644 --- a/src/tasks/worker.rs +++ b/src/tasks/worker.rs @@ -604,10 +604,20 @@ impl TaskWorker { let recording_broadcaster = Arc::new(EventBroadcaster::new_with_recording(1000, true)); let broadcaster_clone = recording_broadcaster.clone(); - // Execute task with recording broadcaster - let result = handler - .handle(&task, &self.db, Some(&recording_broadcaster)) - .await; + // Execute the handler inside a task-local scope that exposes the + // recording broadcaster to any code on this task's await chain — + // including reverse-RPC handlers (e.g. `releases/record`), which + // are dispatched on this task by `RpcClient::call_with_timeout` + // when the plugin tags reverse-RPCs with the parent forward + // request id. Without this, plugins that emit events via + // reverse-RPC (rather than synchronously through the handler's + // broadcaster argument) would have no recording context and + // their events would never replay. + let result = crate::events::with_recording_broadcaster( + recording_broadcaster.clone(), + handler.handle(&task, &self.db, Some(&recording_broadcaster)), + ) + .await; // Get recorded events before returning let events = broadcaster_clone.take_recorded_events(); @@ -634,10 +644,22 @@ impl TaskWorker { (self.event_broadcaster.clone(), None) }; - // Execute task with shared broadcaster (single-process mode) - let result = handler - .handle(&task, &self.db, task_broadcaster.as_ref()) - .await; + // Execute task with shared broadcaster (single-process mode). + // Set the task-local to the shared broadcaster too, so reverse-RPC + // handlers see *the same* broadcaster the rest of the task uses. + // The shared broadcaster has recording disabled here (web/single- + // process mode), so emits flow straight to live SSE subscribers. + let result = if let Some(ref shared) = task_broadcaster { + crate::events::with_recording_broadcaster( + shared.clone(), + handler.handle(&task, &self.db, task_broadcaster.as_ref()), + ) + .await + } else { + handler + .handle(&task, &self.db, task_broadcaster.as_ref()) + .await + }; // Update task status based on result match result { From 7789d78d9e671e795964ec2196fdb88de0cc21e3 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Tue, 5 May 2026 19:35:45 -0700 Subject: [PATCH 24/29] refactor(release-tracking): replace per-source poll seconds with cron schedules Release-source polling cadence is now a cron expression end-to-end, matching the rest of the app (library scans, dedup, thumbnails, PDF cache cleanup). The previous `poll_interval_s` field forced a leaky abstraction: seconds were mapped to wall-clock-aligned cron strings inside the scheduler, jitter was applied at registration, and admins reasoned in two different units. Backend changes: - `release_sources.poll_interval_s INTEGER NOT NULL` becomes `release_sources.cron_schedule TEXT NULL`. NULL means "inherit the server-wide default" so a settings change propagates to every uncustomized row without per-row writes. - New seeded setting `release_tracking.default_cron_schedule` (default `0 0 * * *`) is the resolution fallback. Compile-time default kicks in only if the setting row is missing. - `services/release/schedule.rs` collapses to two helpers (`read_default_cron_schedule`, `resolve_cron_schedule`); seconds-based jitter, backoff multiplication, and the `secs_to_cron` approximator are gone. Per-host backoff stays in the polling task, where 429/503 already drives multiplier state. - Scheduler feeds the resolved cron straight to `tokio-cron-scheduler` via the existing `normalize_cron_expression` helper (5- or 6-field accepted) and only re-registers when the effective expression actually changes. - `default_poll_interval_s` removed from the plugin manifest protocol, the TS SDK, and the Nyaa + MangaUpdates plugins. Polling cadence is a host concern, not a plugin one; new rows simply inherit the server default. - API DTO exposes `cronSchedule` (raw, may be null) and `effectiveCronSchedule` (resolved). PATCH uses double-Option semantics so callers can clear the override with `null`. Frontend changes: - `ReleaseTrackingSettings` row replaces the seconds NumberInput with the shared ``. When inheriting, the row shows the resolved default in human form with `(Default)` plus an "Override" affordance; when overridden, the editor renders inline with a "Reset to default" link. Empty input clears the override. Tests updated across repository, scheduler, plugin handler, integration suites, and the affected frontend specs. --- docs/api/openapi.json | 38 +-- migration/src/lib.rs | 4 + .../m20260503_000073_create_release_ledger.rs | 12 +- ...0079_seed_release_tracking_default_cron.rs | 100 ++++++++ plugins/release-mangaupdates/src/index.ts | 4 +- plugins/release-mangaupdates/src/manifest.ts | 5 - plugins/release-nyaa/src/index.ts | 9 +- plugins/release-nyaa/src/manifest.ts | 5 - plugins/sdk-typescript/src/types/manifest.ts | 6 - src/api/routes/v1/dto/release.rs | 74 +++++- src/api/routes/v1/handlers/releases.rs | 41 +++- src/db/entities/release_sources.rs | 6 +- src/db/repositories/release_ledger.rs | 4 - src/db/repositories/release_sources.rs | 166 ++++++++++--- src/scheduler/mod.rs | 22 +- src/scheduler/release_sources.rs | 220 ++++++------------ src/services/plugin/protocol.rs | 12 +- src/services/plugin/releases_handler.rs | 37 ++- src/services/release/schedule.rs | 185 +++++---------- src/tasks/handlers/poll_release_source.rs | 5 +- tests/api/releases.rs | 55 ++++- web/openapi.json | 38 +-- web/src/api/releases.ts | 2 +- .../series/SeriesReleasesPanel.test.tsx | 3 +- web/src/pages/ReleasesInbox.test.tsx | 3 +- .../settings/ReleaseTrackingSettings.test.tsx | 3 +- .../settings/ReleaseTrackingSettings.tsx | 153 ++++++++---- web/src/types/api.generated.ts | 41 +++- 28 files changed, 725 insertions(+), 528 deletions(-) create mode 100644 migration/src/m20260505_000079_seed_release_tracking_default_cron.rs diff --git a/docs/api/openapi.json b/docs/api/openapi.json index 35de4c47..c74036fc 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -7193,7 +7193,7 @@ "Releases" ], "summary": "PATCH a release source (admin-only).", - "description": "Toggle `enabled`, override `pollIntervalS`, or rename `displayName`.", + "description": "Toggle `enabled`, override `cronSchedule`, or rename `displayName`.\nSending `cronSchedule: null` clears the override and reverts the row to\ninheriting the server-wide `release_tracking.default_cron_schedule`.", "operationId": "update_release_source", "parameters": [ { @@ -7305,7 +7305,7 @@ "Releases" ], "summary": "Reset a release source to a clean slate.", - "description": "Deletes every `release_ledger` row owned by the source and clears the\nsource's transient poll state (`etag`, `last_polled_at`, `last_error`,\n`last_error_at`, `last_summary`). User-managed fields (`enabled`,\n`poll_interval_s`, `display_name`, `config`) are preserved.\n\nIntended for testing/troubleshooting: after a reset, the next poll\nfetches the upstream feed without an `If-None-Match` header (so no 304\nshort-circuit) and re-records every release as `announced`. Does NOT\nauto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after\nresetting if you want immediate re-fetch.", + "description": "Deletes every `release_ledger` row owned by the source and clears the\nsource's transient poll state (`etag`, `last_polled_at`, `last_error`,\n`last_error_at`, `last_summary`). User-managed fields (`enabled`,\n`cron_schedule`, `display_name`, `config`) are preserved.\n\nIntended for testing/troubleshooting: after a reset, the next poll\nfetches the upstream feed without an `If-None-Match` header (so no 304\nshort-circuit) and re-records every release as `announced`. Does NOT\nauto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after\nresetting if you want immediate re-fetch.", "operationId": "reset_release_source", "parameters": [ { @@ -33375,7 +33375,7 @@ "displayName", "kind", "enabled", - "pollIntervalS", + "effectiveCronSchedule", "createdAt", "updatedAt" ], @@ -33387,9 +33387,20 @@ "type": "string", "format": "date-time" }, + "cronSchedule": { + "type": [ + "string", + "null" + ], + "description": "Per-source cron override (5-field POSIX cron). NULL when the row\ninherits the server-wide `release_tracking.default_cron_schedule`." + }, "displayName": { "type": "string" }, + "effectiveCronSchedule": { + "type": "string", + "description": "The cron expression actually used by the scheduler for this source:\nthe row's `cron_schedule` if set, otherwise the resolved server-wide\ndefault. Lets the UI display \"Daily (Default)\" without needing to\nfetch the global setting separately." + }, "enabled": { "type": "boolean" }, @@ -33441,10 +33452,6 @@ "description": "Owning plugin id, or `core` for in-core synthetic sources.", "example": "release-nyaa" }, - "pollIntervalS": { - "type": "integer", - "format": "int32" - }, "sourceKey": { "type": "string", "description": "Plugin-defined unique key.", @@ -38699,8 +38706,15 @@ }, "UpdateReleaseSourceRequest": { "type": "object", - "description": "PATCH payload for a release source. All fields optional; omit to leave alone.", + "description": "PATCH payload for a release source. All fields optional; omit to leave alone.\n\n`cron_schedule` uses double-Option semantics:\n- field absent (`None`): leave the row's cron_schedule unchanged\n- explicit `null` (`Some(None)`) / `\"\"` / `\" \"`: clear the override\n (revert to inheriting the server-wide\n `release_tracking.default_cron_schedule`)\n- `Some(Some(\"0 */6 * * *\"))`: set a per-source override", "properties": { + "cronSchedule": { + "type": [ + "string", + "null" + ], + "description": "5-field POSIX cron expression. Use `null` (or empty string) to\nclear the override and inherit the server-wide default." + }, "displayName": { "type": [ "string", @@ -38712,14 +38726,6 @@ "boolean", "null" ] - }, - "pollIntervalS": { - "type": [ - "integer", - "null" - ], - "format": "int32", - "description": "Polling interval override (seconds). Must be > 0." } } }, diff --git a/migration/src/lib.rs b/migration/src/lib.rs index e70401ed..281e5821 100644 --- a/migration/src/lib.rs +++ b/migration/src/lib.rs @@ -161,6 +161,8 @@ mod m20260504_000076_seed_release_tracking_notify_filters; mod m20260505_000077_add_release_sources_last_summary; // Release tracking: per-row media_url + media_url_kind for torrent/magnet/DDL mod m20260505_000078_add_release_ledger_media_url; +// Release tracking: server-wide default cron schedule for release-source polling +mod m20260505_000079_seed_release_tracking_default_cron; pub struct Migrator; @@ -292,6 +294,8 @@ impl MigratorTrait for Migrator { Box::new(m20260505_000077_add_release_sources_last_summary::Migration), // Release tracking: per-row media_url + media_url_kind Box::new(m20260505_000078_add_release_ledger_media_url::Migration), + // Release tracking: server-wide default cron schedule + Box::new(m20260505_000079_seed_release_tracking_default_cron::Migration), ] } } diff --git a/migration/src/m20260503_000073_create_release_ledger.rs b/migration/src/m20260503_000073_create_release_ledger.rs index 3b44d27f..a3557db6 100644 --- a/migration/src/m20260503_000073_create_release_ledger.rs +++ b/migration/src/m20260503_000073_create_release_ledger.rs @@ -78,11 +78,11 @@ impl MigrationTrait for Migration { .not_null() .default(true), ) - .col( - ColumnDef::new(ReleaseSources::PollIntervalS) - .integer() - .not_null(), - ) + // Per-source cron schedule override. NULL means "inherit the + // server-wide `release_tracking.default_cron_schedule` setting". + // Stored as a 5-field POSIX cron expression (the host normalizes + // to 6-field at scheduler-load time). + .col(ColumnDef::new(ReleaseSources::CronSchedule).string_len(120)) .col(ColumnDef::new(ReleaseSources::LastPolledAt).timestamp_with_time_zone()) .col(ColumnDef::new(ReleaseSources::LastError).text()) .col(ColumnDef::new(ReleaseSources::LastErrorAt).timestamp_with_time_zone()) @@ -307,7 +307,7 @@ pub enum ReleaseSources { DisplayName, Kind, Enabled, - PollIntervalS, + CronSchedule, LastPolledAt, LastError, LastErrorAt, diff --git a/migration/src/m20260505_000079_seed_release_tracking_default_cron.rs b/migration/src/m20260505_000079_seed_release_tracking_default_cron.rs new file mode 100644 index 00000000..a49abd87 --- /dev/null +++ b/migration/src/m20260505_000079_seed_release_tracking_default_cron.rs @@ -0,0 +1,100 @@ +//! Seed the server-wide `release_tracking.default_cron_schedule` setting. +//! +//! Resolution chain for a `release_sources` row's effective schedule: +//! 1. `release_sources.cron_schedule` if non-NULL +//! 2. otherwise this server-wide default +//! 3. otherwise the compile-time fallback (`"0 0 * * *"`, daily) +//! +//! Stored as a 5-field POSIX cron string. The host normalizes to the +//! 6-field format expected by `tokio-cron-scheduler` at scheduler-load time +//! via `crate::utils::cron::normalize_cron_expression`. + +use sea_orm::{ActiveModelTrait, Set, Statement, entity::prelude::*}; +use sea_orm_migration::prelude::*; +use uuid::Uuid; + +#[derive(DeriveMigrationName)] +pub struct Migration; + +#[derive(Clone, Debug, PartialEq, Eq, DeriveEntityModel)] +#[sea_orm(table_name = "settings")] +pub struct Model { + #[sea_orm(primary_key, auto_increment = false)] + pub id: Uuid, + pub key: String, + pub value: String, + pub value_type: String, + pub category: String, + pub description: String, + pub is_sensitive: bool, + pub default_value: String, + pub validation_rules: Option, + pub min_value: Option, + pub max_value: Option, + pub updated_at: chrono::DateTime, + pub updated_by: Option, + pub version: i32, + pub deleted_at: Option>, +} + +#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] +pub enum Relation {} + +impl ActiveModelBehavior for ActiveModel {} + +const SETTING_KEY: &str = "release_tracking.default_cron_schedule"; +const DEFAULT_CRON: &str = "0 0 * * *"; // daily at midnight (5-field POSIX) + +#[async_trait::async_trait] +impl MigrationTrait for Migration { + async fn up(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let db = manager.get_connection(); + + let exists = db + .query_one(Statement::from_string( + manager.get_database_backend(), + format!("SELECT COUNT(*) as count FROM settings WHERE key = '{SETTING_KEY}'"), + )) + .await?; + if let Some(row) = exists { + let count: i64 = row.try_get("", "count")?; + if count > 0 { + return Ok(()); + } + } + + let setting = ActiveModel { + id: Set(Uuid::new_v4()), + key: Set(SETTING_KEY.to_string()), + value: Set(DEFAULT_CRON.to_string()), + value_type: Set("String".to_string()), + category: Set("Release Tracking".to_string()), + description: Set( + "Server-wide default cron schedule for release-source polling. Applied to any `release_sources` row whose `cron_schedule` is NULL. Standard 5-field POSIX cron (minute hour day_of_month month day_of_week)." + .to_string(), + ), + is_sensitive: Set(false), + default_value: Set(DEFAULT_CRON.to_string()), + validation_rules: Set(Some(r#"{"input_type": "cron"}"#.to_string())), + min_value: Set(None), + max_value: Set(None), + updated_at: Set(chrono::Utc::now()), + updated_by: Set(None), + version: Set(1), + deleted_at: Set(None), + }; + + setting.insert(db).await?; + Ok(()) + } + + async fn down(&self, manager: &SchemaManager) -> Result<(), DbErr> { + let db = manager.get_connection(); + db.execute(Statement::from_string( + manager.get_database_backend(), + format!("DELETE FROM settings WHERE key = '{SETTING_KEY}'"), + )) + .await?; + Ok(()) + } +} diff --git a/plugins/release-mangaupdates/src/index.ts b/plugins/release-mangaupdates/src/index.ts index 01153413..90676f19 100644 --- a/plugins/release-mangaupdates/src/index.ts +++ b/plugins/release-mangaupdates/src/index.ts @@ -38,7 +38,7 @@ import { } from "@ashdev/codex-plugin-sdk"; import { fetchSeriesFeed } from "./fetcher.js"; import { parseCommaList, passesFilters, resolveFilters } from "./filter.js"; -import { DEFAULT_POLL_INTERVAL_S, EXTERNAL_ID_SOURCE_MANGAUPDATES, manifest } from "./manifest.js"; +import { EXTERNAL_ID_SOURCE_MANGAUPDATES, manifest } from "./manifest.js"; import { type ParsedRssItem, parseFeed } from "./parser.js"; const logger = createLogger({ name: manifest.name, level: "info" }); @@ -460,7 +460,7 @@ createReleaseSourcePlugin({ state.requestTimeoutMs = Math.max(1_000, Math.min(ac.requestTimeoutMs, 60_000)); } logger.info( - `initialized: blockedGroups=${state.blockedGroupsCsv ? "set" : "empty"} timeoutMs=${state.requestTimeoutMs} defaultPoll=${DEFAULT_POLL_INTERVAL_S}s`, + `initialized: blockedGroups=${state.blockedGroupsCsv ? "set" : "empty"} timeoutMs=${state.requestTimeoutMs}`, ); // Materialize the single static source row. Deferred to a microtask so diff --git a/plugins/release-mangaupdates/src/manifest.ts b/plugins/release-mangaupdates/src/manifest.ts index 5aa71ef8..3719786d 100644 --- a/plugins/release-mangaupdates/src/manifest.ts +++ b/plugins/release-mangaupdates/src/manifest.ts @@ -11,10 +11,6 @@ import packageJson from "../package.json" with { type: "json" }; */ export const EXTERNAL_ID_SOURCE_MANGAUPDATES = "mangaupdates" as const; -/** Default poll interval: 24 hours. Daily polls match upstream cadence and - * keep the per-series fan-out gentle for users tracking hundreds of series. */ -export const DEFAULT_POLL_INTERVAL_S = 86_400; - export const manifest = { name: "release-mangaupdates", displayName: "MangaUpdates Releases", @@ -31,7 +27,6 @@ export const manifest = { requiresExternalIds: [EXTERNAL_ID_SOURCE_MANGAUPDATES], canAnnounceChapters: true, canAnnounceVolumes: true, - defaultPollIntervalS: DEFAULT_POLL_INTERVAL_S, }, }, configSchema: { diff --git a/plugins/release-nyaa/src/index.ts b/plugins/release-nyaa/src/index.ts index c4a15691..aa6a91fc 100644 --- a/plugins/release-nyaa/src/index.ts +++ b/plugins/release-nyaa/src/index.ts @@ -50,12 +50,7 @@ import { subscriptionToSourceKey, type UploaderSubscription, } from "./fetcher.js"; -import { - DEFAULT_MIN_CONFIDENCE, - DEFAULT_POLL_INTERVAL_S, - DEFAULT_REQUEST_TIMEOUT_MS, - manifest, -} from "./manifest.js"; +import { DEFAULT_MIN_CONFIDENCE, DEFAULT_REQUEST_TIMEOUT_MS, manifest } from "./manifest.js"; import { type AliasCandidate, type AliasMatch, matchSeriesAny } from "./matcher.js"; import { type ParsedRssItem, parseFeed } from "./parser.js"; @@ -499,7 +494,7 @@ createReleaseSourcePlugin({ state.baseUrl = ac.baseUrl.trim(); } logger.info( - `initialized: subscriptions=${state.subscriptions.length} timeoutMs=${state.requestTimeoutMs} minConfidence=${state.minConfidence} defaultPoll=${DEFAULT_POLL_INTERVAL_S}s`, + `initialized: subscriptions=${state.subscriptions.length} timeoutMs=${state.requestTimeoutMs} minConfidence=${state.minConfidence}`, ); // Materialize source rows. Deferred to a microtask + retry on diff --git a/plugins/release-nyaa/src/manifest.ts b/plugins/release-nyaa/src/manifest.ts index e1e5f58f..0ae3d922 100644 --- a/plugins/release-nyaa/src/manifest.ts +++ b/plugins/release-nyaa/src/manifest.ts @@ -1,10 +1,6 @@ import type { PluginManifest } from "@ashdev/codex-plugin-sdk"; import packageJson from "../package.json" with { type: "json" }; -/** Default poll interval: 24 hours. Daily polls keep the per-uploader fan-out - * gentle and respect Nyaa's preference for low-frequency clients. */ -export const DEFAULT_POLL_INTERVAL_S = 86_400; - /** Default per-fetch HTTP timeout. Nyaa is usually fast; 10s is generous. */ export const DEFAULT_REQUEST_TIMEOUT_MS = 10_000; @@ -32,7 +28,6 @@ export const manifest = { requiresAliases: true, canAnnounceChapters: true, canAnnounceVolumes: true, - defaultPollIntervalS: DEFAULT_POLL_INTERVAL_S, }, }, configSchema: { diff --git a/plugins/sdk-typescript/src/types/manifest.ts b/plugins/sdk-typescript/src/types/manifest.ts index 469dc056..b2c421c0 100644 --- a/plugins/sdk-typescript/src/types/manifest.ts +++ b/plugins/sdk-typescript/src/types/manifest.ts @@ -63,12 +63,6 @@ export interface ReleaseSourceCapability { canAnnounceChapters?: boolean; /** Whether the plugin announces volume-level releases. */ canAnnounceVolumes?: boolean; - /** - * Default poll interval in seconds. Used when a `release_sources` row for - * this plugin doesn't override it. Server settings can also set a global - * default that takes precedence at schedule resolution time. - */ - defaultPollIntervalS?: number; } /** diff --git a/src/api/routes/v1/dto/release.rs b/src/api/routes/v1/dto/release.rs index be02566e..9b661763 100644 --- a/src/api/routes/v1/dto/release.rs +++ b/src/api/routes/v1/dto/release.rs @@ -145,7 +145,15 @@ pub struct ReleaseSourceDto { /// `rss-uploader` | `rss-series` | `api-feed` | `metadata-feed` | `metadata-piggyback`. pub kind: String, pub enabled: bool, - pub poll_interval_s: i32, + /// Per-source cron override (5-field POSIX cron). NULL when the row + /// inherits the server-wide `release_tracking.default_cron_schedule`. + #[serde(skip_serializing_if = "Option::is_none")] + pub cron_schedule: Option, + /// The cron expression actually used by the scheduler for this source: + /// the row's `cron_schedule` if set, otherwise the resolved server-wide + /// default. Lets the UI display "Daily (Default)" without needing to + /// fetch the global setting separately. + pub effective_cron_schedule: String, #[serde(skip_serializing_if = "Option::is_none")] pub last_polled_at: Option>, #[serde(skip_serializing_if = "Option::is_none")] @@ -168,8 +176,15 @@ pub struct ReleaseSourceDto { pub updated_at: DateTime, } -impl From for ReleaseSourceDto { - fn from(m: release_sources::Model) -> Self { +impl ReleaseSourceDto { + /// Build the DTO from a model + the resolved server-wide default cron + /// schedule. Use this in handlers that already have the default in + /// hand (avoids a settings round-trip per row). + pub fn from_model_with_default(m: release_sources::Model, server_default: &str) -> Self { + let effective = crate::services::release::schedule::resolve_cron_schedule( + m.cron_schedule.as_deref(), + server_default, + ); Self { id: m.id, plugin_id: m.plugin_id, @@ -177,7 +192,8 @@ impl From for ReleaseSourceDto { display_name: m.display_name, kind: m.kind, enabled: m.enabled, - poll_interval_s: m.poll_interval_s, + cron_schedule: m.cron_schedule, + effective_cron_schedule: effective, last_polled_at: m.last_polled_at, last_error: m.last_error, last_error_at: m.last_error_at, @@ -190,6 +206,16 @@ impl From for ReleaseSourceDto { } } +impl From for ReleaseSourceDto { + /// Convenience for callers that don't have the server default handy + /// (e.g. unit tests). Falls back to the compile-time + /// `DEFAULT_CRON_SCHEDULE` for resolution. Production handlers should + /// prefer [`ReleaseSourceDto::from_model_with_default`]. + fn from(m: release_sources::Model) -> Self { + Self::from_model_with_default(m, crate::services::release::schedule::DEFAULT_CRON_SCHEDULE) + } +} + #[derive(Debug, Serialize, Deserialize, ToSchema)] #[serde(rename_all = "camelCase")] pub struct ReleaseSourceListResponse { @@ -197,13 +223,49 @@ pub struct ReleaseSourceListResponse { } /// PATCH payload for a release source. All fields optional; omit to leave alone. +/// +/// `cron_schedule` uses double-Option semantics: +/// - field absent (`None`): leave the row's cron_schedule unchanged +/// - explicit `null` (`Some(None)`) / `""` / `" "`: clear the override +/// (revert to inheriting the server-wide +/// `release_tracking.default_cron_schedule`) +/// - `Some(Some("0 */6 * * *"))`: set a per-source override #[derive(Debug, Clone, Default, Serialize, Deserialize, ToSchema)] #[serde(rename_all = "camelCase")] pub struct UpdateReleaseSourceRequest { pub display_name: Option, pub enabled: Option, - /// Polling interval override (seconds). Must be > 0. - pub poll_interval_s: Option, + /// 5-field POSIX cron expression. Use `null` (or empty string) to + /// clear the override and inherit the server-wide default. + #[serde(default, with = "double_option")] + pub cron_schedule: Option>, +} + +/// Local copy of the `Option>` serde adapter used by `tracking.rs`. +/// See that module for the full rationale; in short: distinguishes "field +/// absent" (leave alone) from "explicit null" (clear). +mod double_option { + use serde::{Deserialize, Deserializer, Serialize, Serializer}; + + pub fn serialize(value: &Option>, ser: S) -> Result + where + S: Serializer, + T: Serialize, + { + match value { + Some(Some(v)) => v.serialize(ser), + Some(None) => ser.serialize_none(), + None => ser.serialize_none(), + } + } + + pub fn deserialize<'de, D, T>(de: D) -> Result>, D::Error> + where + D: Deserializer<'de>, + T: Deserialize<'de>, + { + Option::::deserialize(de).map(Some) + } } /// Response shape from the `reset` endpoint. diff --git a/src/api/routes/v1/handlers/releases.rs b/src/api/routes/v1/handlers/releases.rs index e4acb463..ca09d752 100644 --- a/src/api/routes/v1/handlers/releases.rs +++ b/src/api/routes/v1/handlers/releases.rs @@ -855,14 +855,39 @@ pub async fn list_release_sources( let sources = ReleaseSourceRepository::list_all(&state.db) .await .map_err(|e| ApiError::Internal(format!("Failed to list sources: {}", e)))?; + let server_default = resolve_server_default_cron(&state.db).await; Ok(Json(ReleaseSourceListResponse { - sources: sources.into_iter().map(Into::into).collect(), + sources: sources + .into_iter() + .map(|m| ReleaseSourceDto::from_model_with_default(m, &server_default)) + .collect(), })) } +/// Fetch the server-wide default cron schedule for release-source polling. +/// Falls back to the compile-time default on a settings-fetch failure +/// rather than 500-ing the request — the field is informational on the +/// response shape. +async fn resolve_server_default_cron(db: &sea_orm::DatabaseConnection) -> String { + use crate::services::release::schedule::{DEFAULT_CRON_SCHEDULE, read_default_cron_schedule}; + use crate::services::settings::SettingsService; + match SettingsService::new(db.clone()).await { + Ok(svc) => read_default_cron_schedule(&svc).await, + Err(e) => { + tracing::warn!( + "Failed to load settings service for cron resolution; using compile-time default: {}", + e + ); + DEFAULT_CRON_SCHEDULE.to_string() + } + } +} + /// PATCH a release source (admin-only). /// -/// Toggle `enabled`, override `pollIntervalS`, or rename `displayName`. +/// Toggle `enabled`, override `cronSchedule`, or rename `displayName`. +/// Sending `cronSchedule: null` clears the override and reverts the row to +/// inheriting the server-wide `release_tracking.default_cron_schedule`. #[utoipa::path( patch, path = "/api/v1/release-sources/{source_id}", @@ -899,7 +924,7 @@ pub async fn update_release_source( let update = ReleaseSourceUpdate { display_name: request.display_name, enabled: request.enabled, - poll_interval_s: request.poll_interval_s, + cron_schedule: request.cron_schedule, config: None, // config edits go through plugin admin, not here }; @@ -907,7 +932,7 @@ pub async fn update_release_source( .await .map_err(|e| { let msg = e.to_string(); - if msg.contains("positive") { + if msg.to_lowercase().contains("cron") { ApiError::BadRequest(msg) } else { ApiError::Internal(format!("Failed to update source: {}", e)) @@ -928,7 +953,11 @@ pub async fn update_release_source( } } - Ok(Json(updated.into())) + let server_default = resolve_server_default_cron(&state.db).await; + Ok(Json(ReleaseSourceDto::from_model_with_default( + updated, + &server_default, + ))) } /// Trigger a manual poll for a source. @@ -1004,7 +1033,7 @@ pub async fn poll_release_source_now( /// Deletes every `release_ledger` row owned by the source and clears the /// source's transient poll state (`etag`, `last_polled_at`, `last_error`, /// `last_error_at`, `last_summary`). User-managed fields (`enabled`, -/// `poll_interval_s`, `display_name`, `config`) are preserved. +/// `cron_schedule`, `display_name`, `config`) are preserved. /// /// Intended for testing/troubleshooting: after a reset, the next poll /// fetches the upstream feed without an `If-None-Match` header (so no 304 diff --git a/src/db/entities/release_sources.rs b/src/db/entities/release_sources.rs index 9d845975..661fac7a 100644 --- a/src/db/entities/release_sources.rs +++ b/src/db/entities/release_sources.rs @@ -25,7 +25,11 @@ pub struct Model { /// `rss-uploader` | `rss-series` | `api-feed` | `metadata-feed` | `metadata-piggyback`. pub kind: String, pub enabled: bool, - pub poll_interval_s: i32, + /// 5-field POSIX cron expression. NULL means "inherit the server-wide + /// `release_tracking.default_cron_schedule` setting." The host + /// normalizes to the 6-field format expected by `tokio-cron-scheduler` + /// at scheduler-load time. + pub cron_schedule: Option, pub last_polled_at: Option>, pub last_error: Option, pub last_error_at: Option>, diff --git a/src/db/repositories/release_ledger.rs b/src/db/repositories/release_ledger.rs index 8be3f63d..b6b1f440 100644 --- a/src/db/repositories/release_ledger.rs +++ b/src/db/repositories/release_ledger.rs @@ -528,7 +528,6 @@ mod tests { source_key: "nyaa:user:tsuna69".to_string(), display_name: "Nyaa - tsuna69".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, @@ -617,7 +616,6 @@ mod tests { source_key: "nyaa:user:other".to_string(), display_name: "Nyaa - other".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, @@ -777,7 +775,6 @@ mod tests { source_key: "nyaa:user:tsuna69".to_string(), display_name: "Nyaa - tsuna69".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, @@ -1036,7 +1033,6 @@ mod tests { source_key: "nyaa:user:other".to_string(), display_name: "Nyaa - other".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, diff --git a/src/db/repositories/release_sources.rs b/src/db/repositories/release_sources.rs index 4e0f2e48..da8130db 100644 --- a/src/db/repositories/release_sources.rs +++ b/src/db/repositories/release_sources.rs @@ -19,6 +19,21 @@ use uuid::Uuid; use crate::db::entities::release_sources::{ self, Entity as ReleaseSources, Model as ReleaseSource, kind, }; +use crate::utils::cron::validate_cron_expression; + +/// Normalize a caller-supplied cron schedule: trim, treat empty as `None`, +/// validate the parse, and return the trimmed string. Errors when the +/// expression is non-empty but invalid. +fn sanitize_cron_schedule(value: Option) -> Result> { + let Some(raw) = value else { return Ok(None) }; + let trimmed = raw.trim(); + if trimmed.is_empty() { + return Ok(None); + } + validate_cron_expression(trimmed) + .map_err(|e| anyhow::anyhow!("invalid cron_schedule: {}", e))?; + Ok(Some(trimmed.to_string())) +} /// Parameters for creating a new release source. Only the fields a caller is /// expected to choose live here; `created_at` / `updated_at` / `id` are @@ -29,20 +44,19 @@ pub struct NewReleaseSource { pub source_key: String, pub display_name: String, pub kind: String, - pub poll_interval_s: i32, pub enabled: Option, pub config: Option, } /// PATCH-style update payload. Each `Option` distinguishes "leave alone" -/// (`None`) from "set". For nullable columns a simple `Option>` is -/// not needed at this stage because the existing fields are status fields -/// that the caller can read-modify-write through dedicated helpers. +/// (`None`) from "set". `cron_schedule` uses `Option>` so the +/// caller can explicitly clear a row's override (revert to inheriting the +/// server-wide default) by sending `Some(None)`. #[derive(Debug, Default, Clone)] pub struct ReleaseSourceUpdate { pub display_name: Option, pub enabled: Option, - pub poll_interval_s: Option, + pub cron_schedule: Option>, pub config: Option>, } @@ -91,6 +105,8 @@ impl ReleaseSourceRepository { } /// Create a new source. Validates `kind` against the canonical set. + /// New rows always start with `cron_schedule = NULL` (inherit the + /// server-wide default); admins can override per-row via PATCH. pub async fn create( db: &DatabaseConnection, params: NewReleaseSource, @@ -98,9 +114,6 @@ impl ReleaseSourceRepository { if !kind::is_valid(¶ms.kind) { anyhow::bail!("invalid kind: {}", params.kind); } - if params.poll_interval_s <= 0 { - anyhow::bail!("poll_interval_s must be positive"); - } if params.plugin_id.trim().is_empty() { anyhow::bail!("plugin_id cannot be empty"); } @@ -116,7 +129,7 @@ impl ReleaseSourceRepository { display_name: Set(params.display_name), kind: Set(params.kind), enabled: Set(params.enabled.unwrap_or(true)), - poll_interval_s: Set(params.poll_interval_s), + cron_schedule: Set(None), last_polled_at: Set(None), last_error: Set(None), last_error_at: Set(None), @@ -148,7 +161,7 @@ impl ReleaseSourceRepository { /// On insert, the row is created with `params` and defaults to enabled. /// On update, **only the plugin-owned descriptive fields** are refreshed /// (`display_name`, `kind`, `config`). User-managed fields (`enabled`, - /// `poll_interval_s`) are preserved so an admin's interval override or + /// `cron_schedule`) are preserved so an admin's schedule override or /// disable toggle survives a plugin re-registration. /// /// Used by `releases/register_sources` so a plugin can declare its full @@ -213,12 +226,6 @@ impl ReleaseSourceRepository { .await? .ok_or_else(|| anyhow::anyhow!("release source {} not found", id))?; - if let Some(interval) = update.poll_interval_s - && interval <= 0 - { - anyhow::bail!("poll_interval_s must be positive"); - } - let mut active: release_sources::ActiveModel = existing.into(); if let Some(name) = update.display_name { active.display_name = Set(name); @@ -226,8 +233,10 @@ impl ReleaseSourceRepository { if let Some(enabled) = update.enabled { active.enabled = Set(enabled); } - if let Some(interval) = update.poll_interval_s { - active.poll_interval_s = Set(interval); + if let Some(cron) = update.cron_schedule { + // Some(None) -> clear (inherit server default); Some(Some(s)) -> set override. + let sanitized = sanitize_cron_schedule(cron)?; + active.cron_schedule = Set(sanitized); } if let Some(cfg) = update.config { active.config = Set(cfg); @@ -294,7 +303,7 @@ impl ReleaseSourceRepository { /// Reset all transient poll state on a source: clears `etag`, /// `last_polled_at`, `last_error`, `last_error_at`, and `last_summary`. - /// Leaves user-managed fields (`enabled`, `poll_interval_s`, + /// Leaves user-managed fields (`enabled`, `cron_schedule`, /// `display_name`, `config`) untouched. /// /// Used by the source-reset admin endpoint so a forced re-poll fetches @@ -373,7 +382,6 @@ mod tests { source_key: "nyaa:user:tsuna69".to_string(), display_name: "Nyaa - tsuna69".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, } @@ -418,16 +426,103 @@ mod tests { } #[tokio::test] - async fn create_rejects_non_positive_interval() { + async fn update_rejects_invalid_cron() { let (db, _temp) = create_test_db().await; let conn = db.sea_orm_connection(); - let mut params = nyaa_source(); - params.poll_interval_s = 0; - let err = ReleaseSourceRepository::create(conn, params) + let s = ReleaseSourceRepository::create(conn, nyaa_source()) .await - .unwrap_err(); - assert!(err.to_string().contains("positive")); + .unwrap(); + let err = ReleaseSourceRepository::update( + conn, + s.id, + ReleaseSourceUpdate { + cron_schedule: Some(Some("not a cron".to_string())), + ..Default::default() + }, + ) + .await + .unwrap_err(); + assert!(err.to_string().to_lowercase().contains("cron")); + } + + #[tokio::test] + async fn update_clears_cron_schedule_with_explicit_none() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let s = ReleaseSourceRepository::create(conn, nyaa_source()) + .await + .unwrap(); + // Set an override. + ReleaseSourceRepository::update( + conn, + s.id, + ReleaseSourceUpdate { + cron_schedule: Some(Some("0 */6 * * *".to_string())), + ..Default::default() + }, + ) + .await + .unwrap(); + let after_set = ReleaseSourceRepository::get_by_id(conn, s.id) + .await + .unwrap() + .unwrap(); + assert_eq!(after_set.cron_schedule.as_deref(), Some("0 */6 * * *")); + + // Clear back to inherit. + ReleaseSourceRepository::update( + conn, + s.id, + ReleaseSourceUpdate { + cron_schedule: Some(None), + ..Default::default() + }, + ) + .await + .unwrap(); + let after_clear = ReleaseSourceRepository::get_by_id(conn, s.id) + .await + .unwrap() + .unwrap(); + assert!(after_clear.cron_schedule.is_none()); + } + + #[tokio::test] + async fn update_treats_empty_cron_as_clear() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let s = ReleaseSourceRepository::create(conn, nyaa_source()) + .await + .unwrap(); + ReleaseSourceRepository::update( + conn, + s.id, + ReleaseSourceUpdate { + cron_schedule: Some(Some("0 */6 * * *".to_string())), + ..Default::default() + }, + ) + .await + .unwrap(); + + ReleaseSourceRepository::update( + conn, + s.id, + ReleaseSourceUpdate { + cron_schedule: Some(Some(" ".to_string())), + ..Default::default() + }, + ) + .await + .unwrap(); + let after = ReleaseSourceRepository::get_by_id(conn, s.id) + .await + .unwrap() + .unwrap(); + assert!(after.cron_schedule.is_none()); } #[tokio::test] @@ -566,15 +661,18 @@ mod tests { .await .unwrap(); assert!(created.enabled); - assert_eq!(created.poll_interval_s, 3600); + assert!( + created.cron_schedule.is_none(), + "fresh row inherits server-wide default" + ); - // Admin disables and overrides interval. + // Admin disables and sets a cron override. ReleaseSourceRepository::update( conn, created.id, ReleaseSourceUpdate { enabled: Some(false), - poll_interval_s: Some(900), + cron_schedule: Some(Some("0 */6 * * *".to_string())), ..Default::default() }, ) @@ -585,7 +683,6 @@ mod tests { let mut params = nyaa_source(); params.display_name = "Nyaa: tsuna69 (refreshed)".to_string(); params.config = Some(serde_json::json!({ "subscription": "tsuna69" })); - params.poll_interval_s = 7200; // would-be interval is ignored on update let updated = ReleaseSourceRepository::upsert(conn, params).await.unwrap(); assert_eq!(updated.id, created.id, "same key returns same row"); @@ -599,8 +696,9 @@ mod tests { "user-set enabled flag must survive a plugin re-register" ); assert_eq!( - updated.poll_interval_s, 900, - "user-set poll_interval_s must survive a plugin re-register" + updated.cron_schedule.as_deref(), + Some("0 */6 * * *"), + "user-set cron_schedule must survive a plugin re-register" ); } @@ -727,7 +825,7 @@ mod tests { s.id, ReleaseSourceUpdate { enabled: Some(false), - poll_interval_s: Some(900), + cron_schedule: Some(Some("0 */6 * * *".to_string())), ..Default::default() }, ) @@ -749,7 +847,7 @@ mod tests { assert!(after.last_summary.is_none()); // User-managed fields preserved. assert!(!after.enabled); - assert_eq!(after.poll_interval_s, 900); + assert_eq!(after.cron_schedule.as_deref(), Some("0 */6 * * *")); } #[tokio::test] diff --git a/src/scheduler/mod.rs b/src/scheduler/mod.rs index 327e22b1..133da74b 100644 --- a/src/scheduler/mod.rs +++ b/src/scheduler/mod.rs @@ -11,7 +11,6 @@ use crate::db::entities::library_jobs; use crate::db::repositories::{LibraryJobRepository, LibraryRepository, TaskRepository}; use crate::scanner::{ScanMode, ScanningConfig}; use crate::services::library_jobs::{LibraryJobConfig, parse_job_config}; -use crate::services::release::backoff::HostBackoff; use crate::services::settings::SettingsService; use crate::tasks::types::TaskType; use crate::utils::cron::{normalize_cron_expression, parse_timezone}; @@ -22,11 +21,6 @@ pub struct Scheduler { db: DatabaseConnection, /// Server-level default timezone for all cron schedules default_tz: Tz, - /// Per-host backoff state shared with the polling task handler. The - /// scheduler reads this when computing the effective interval at - /// reconcile time so a recently-throttled host doesn't immediately - /// get re-scheduled. - release_backoff: HostBackoff, /// Reconcile state for the per-source release-polling jobs. release_sources: release_sources::ReleaseSourceSchedule, } @@ -59,33 +53,21 @@ impl Scheduler { scheduler, db, default_tz, - release_backoff: HostBackoff::new(), release_sources: release_sources::ReleaseSourceSchedule::new(), }) } - /// Override the per-host backoff store with one shared from the - /// `TaskWorker`. Without this, the scheduler and the polling task have - /// independent (out of sync) backoff state, and 429/503 signals - /// observed by polls won't influence the next-tick interval. - #[allow(dead_code)] // Wired by main.rs once scheduler + worker share state; tests too. - pub fn with_release_backoff(mut self, backoff: HostBackoff) -> Self { - self.release_backoff = backoff; - self - } - /// Trigger a release-source reconcile. Call after writes to the /// `release_sources` table so the scheduler picks up enable/disable /// changes without a full restart. pub async fn reconcile_release_sources(&mut self) -> Result<()> { let settings = SettingsService::new(self.db.clone()).await?; - let default_interval = release_sources::read_default_poll_interval(&settings).await; + let server_default = release_sources::read_server_default_cron(&settings).await; release_sources::reconcile( &mut self.scheduler, &mut self.release_sources, &self.db, - self.release_backoff.clone(), - default_interval, + server_default, ) .await } diff --git a/src/scheduler/release_sources.rs b/src/scheduler/release_sources.rs index 8daa132d..8563b31f 100644 --- a/src/scheduler/release_sources.rs +++ b/src/scheduler/release_sources.rs @@ -1,14 +1,20 @@ //! Release-source polling scheduler integration. //! -//! Each enabled `release_sources` row is reconciled into the scheduler as a -//! tokio-cron-scheduler job. The job fires a `PollReleaseSource` task at the -//! row's effective interval (per-source override → server default), with -//! ±10% jitter applied on registration and per-host backoff applied at -//! firing time. +//! Each enabled `release_sources` row is registered as a tokio-cron-scheduler +//! job whose schedule is the row's effective cron expression: //! -//! `tokio-cron-scheduler` doesn't have a "fire every N seconds with jitter" -//! primitive, so we build a 6-part cron string from the resolved interval -//! and let the existing job machinery handle dispatch. +//! 1. `release_sources.cron_schedule` (per-source override) when non-NULL. +//! 2. Otherwise the server-wide `release_tracking.default_cron_schedule` +//! setting. +//! 3. Otherwise the compile-time fallback (`"0 0 * * *"`, daily). +//! +//! When the cron fires, the job enqueues a `PollReleaseSource` task. The +//! task itself maintains per-host backoff via [`super::super::services:: +//! release::backoff::HostBackoff`] (recording 429/503 from upstream and +//! resetting on success), so the scheduler does not need to skip cron +//! ticks based on backoff state. A cron firing during a throttled window +//! returns a 429 quickly without doing real work, and the task's recorded +//! error feeds the backoff state for the next tick. use anyhow::{Context, Result}; use sea_orm::DatabaseConnection; @@ -18,19 +24,20 @@ use tracing::{debug, error, info, warn}; use uuid::Uuid; use crate::db::repositories::{ReleaseSourceRepository, TaskRepository}; -use crate::services::release::backoff::HostBackoff; -use crate::services::release::schedule::{ - DEFAULT_POLL_INTERVAL_S, MIN_POLL_INTERVAL_S, SETTING_DEFAULT_POLL_INTERVAL_S, apply_backoff, - jitter_interval_s, resolve_interval_s, -}; +use crate::services::release::schedule::{read_default_cron_schedule, resolve_cron_schedule}; use crate::services::settings::SettingsService; use crate::tasks::types::TaskType; +use crate::utils::cron::normalize_cron_expression; /// Tracks scheduler-registered jobs per source row so we can reconcile. #[derive(Debug, Default)] pub struct ReleaseSourceSchedule { /// Map of `release_sources.id` → tokio-cron-scheduler job UUID. jobs: HashMap, + /// Map of `release_sources.id` → effective cron expression currently + /// registered (post-resolution, pre-normalization). Lets `reconcile` + /// detect schedule changes without rebuilding every job on every pass. + last_cron: HashMap, } impl ReleaseSourceSchedule { @@ -47,57 +54,47 @@ impl ReleaseSourceSchedule { } } -/// Read the configured global default poll interval (seconds). Falls back -/// to the compile-time default when settings are unavailable or the value -/// is invalid (`<= 0`). -pub async fn read_default_poll_interval(settings: &SettingsService) -> u32 { - let raw = settings - .get_uint( - SETTING_DEFAULT_POLL_INTERVAL_S, - DEFAULT_POLL_INTERVAL_S as u64, - ) - .await - .unwrap_or(DEFAULT_POLL_INTERVAL_S as u64); - if raw == 0 { - DEFAULT_POLL_INTERVAL_S - } else { - // Clamp on read so a misconfigured row can't push below the - // sane minimum. - raw.max(MIN_POLL_INTERVAL_S as u64).min(u32::MAX as u64) as u32 - } -} - /// Reconcile the scheduler's release-source jobs against the current set of /// enabled rows. Adds new sources, removes disabled/deleted ones, and -/// re-registers any whose interval changed. +/// re-registers any whose `cron_schedule` (or the inherited default) changed. /// /// Idempotent: safe to call repeatedly (e.g. after a `release_sources` write). pub async fn reconcile( scheduler: &mut JobScheduler, state: &mut ReleaseSourceSchedule, db: &DatabaseConnection, - backoff: HostBackoff, - default_interval_s: u32, + server_default: String, ) -> Result<()> { let enabled = ReleaseSourceRepository::list_enabled(db) .await .context("Failed to load enabled release sources")?; - // Track which sources we've seen this pass. let mut seen: std::collections::HashSet = std::collections::HashSet::new(); for source in &enabled { seen.insert(source.id); - // The interval/jitter combo doesn't change between reconciles - // unless the row's `poll_interval_s` is mutated. Cheap rule: - // re-register on every reconcile that doesn't already have the - // job. We accept the small cost of re-registration on a - // poll-interval change. - if state.contains(source.id) { + let effective_cron = + resolve_cron_schedule(source.cron_schedule.as_deref(), &server_default); + + if let Some(prev) = state.last_cron.get(&source.id) + && prev == &effective_cron + && state.contains(source.id) + { + // Same schedule, already registered — nothing to do. continue; } - if let Err(e) = - register_one(scheduler, state, db, &backoff, source, default_interval_s).await + + // Schedule changed (or first time we see this source) — drop any + // existing job and register fresh. + if let Some(job_id) = state.jobs.remove(&source.id) + && let Err(e) = scheduler.remove(&job_id).await { + warn!( + "Failed to remove stale schedule for source {}: {}", + source.id, e + ); + } + + if let Err(e) = register_one(scheduler, state, db, source, &effective_cron).await { warn!( "Failed to register schedule for source {} ({}): {}", source.id, source.display_name, e @@ -114,6 +111,7 @@ pub async fn reconcile( .collect(); for source_id in stale { if let Some(job_id) = state.jobs.remove(&source_id) { + state.last_cron.remove(&source_id); if let Err(e) = scheduler.remove(&job_id).await { warn!( "Failed to remove stale schedule for source {}: {}", @@ -136,19 +134,17 @@ async fn register_one( scheduler: &mut JobScheduler, state: &mut ReleaseSourceSchedule, db: &DatabaseConnection, - backoff: &HostBackoff, source: &crate::db::entities::release_sources::Model, - default_interval_s: u32, + effective_cron: &str, ) -> Result<()> { - let resolved = resolve_interval_s(source.poll_interval_s, default_interval_s); - let jittered = jitter_interval_s(resolved); - // Apply current backoff multiplier so a recently-throttled host - // doesn't get re-polled immediately on scheduler reload. - let url_hint = derive_url_hint(source); - let multiplier = backoff.multiplier(&url_hint).await; - let final_s = apply_backoff(jittered, multiplier); - - let cron = secs_to_cron(final_s); + // Normalize 5-field POSIX cron to the 6-field form tokio-cron-scheduler + // expects (or accept 6-field expressions as-is). + let cron = normalize_cron_expression(effective_cron).with_context(|| { + format!( + "Invalid cron expression for source {} ({}): {}", + source.id, source.display_name, effective_cron + ) + })?; let db_clone = db.clone(); let source_id = source.id; @@ -162,14 +158,11 @@ async fn register_one( source_id, display_name ); let task_type = TaskType::PollReleaseSource { source_id }; - match TaskRepository::enqueue(&db, task_type, None).await { - Ok(_) => {} - Err(e) => { - error!( - "Failed to enqueue PollReleaseSource for source {}: {}", - source_id, e - ); - } + if let Err(e) = TaskRepository::enqueue(&db, task_type, None).await { + error!( + "Failed to enqueue PollReleaseSource for source {}: {}", + source_id, e + ); } }) }) @@ -180,70 +173,17 @@ async fn register_one( .await .with_context(|| format!("Failed to add cron job for source {}", source.id))?; state.jobs.insert(source.id, job_uuid); + state + .last_cron + .insert(source.id, effective_cron.to_string()); info!( - "Scheduled poll for source {} ({}) every {}s (resolved {}, backoff x{:.1})", - source.id, source.display_name, final_s, resolved, multiplier + "Scheduled poll for source {} ({}) with cron `{}`", + source.id, source.display_name, effective_cron ); Ok(()) } -/// Build a 6-part cron expression that fires approximately every `secs` -/// seconds. -/// -/// `tokio-cron-scheduler` doesn't have a `every-N-seconds-from-now` -/// primitive; we approximate with cron-style intervals: -/// -/// - `secs < 3600`: minute-granularity step (`0 */M * * * *`). Capped at 59 -/// minutes since `*/60` is invalid; sources that want longer must hit -/// the hourly branch. -/// - `secs ≥ 3600`: hour-granularity step (`0 0 */H * * *`). Capped at 23 -/// hours; for `secs ≥ 24h` we fall back to "once daily at 00:00" -/// (`0 0 0 * * *`). -/// -/// Caveat: the "step" semantics in cron align to wall-clock boundaries -/// (e.g., `*/30` fires at minute 0 and 30, not at "30 minutes from now"). -/// Combined with ±10% jitter at registration, this still spreads load -/// well; precise inter-poll spacing isn't a goal of this layer. -pub fn secs_to_cron(secs: u32) -> String { - let secs = secs.max(MIN_POLL_INTERVAL_S); - if secs < 3600 { - let mins = secs.div_ceil(60).clamp(1, 59); - if mins == 1 { - "0 * * * * *".to_string() - } else { - format!("0 */{} * * * *", mins) - } - } else if secs < 86_400 { - let hours = (secs / 3600).clamp(1, 23); - if hours == 1 { - "0 0 * * * *".to_string() - } else { - format!("0 0 */{} * * *", hours) - } - } else { - // ≥ 24h — fire once daily at midnight. Sources that want longer - // intervals (rare) get folded into "daily" since cron can't - // express "every 48h" cleanly without a state machine. - "0 0 0 * * *".to_string() - } -} - -/// Best-effort URL hint extraction matching the polling task's logic. Kept -/// in sync to avoid backoff key drift between scheduler and handler. -fn derive_url_hint(source: &crate::db::entities::release_sources::Model) -> String { - if let Some(cfg) = source.config.as_ref() { - for key in ["url", "feedUrl", "feed_url", "baseUrl", "base_url"] { - if let Some(v) = cfg.get(key).and_then(|v| v.as_str()) - && !v.is_empty() - { - return v.to_string(); - } - } - } - source.plugin_id.clone() -} - /// Outcome of an `enqueue_poll_now` call. #[derive(Debug, Clone, Copy)] pub struct EnqueuePollOutcome { @@ -295,35 +235,9 @@ pub async fn enqueue_poll_now( }) } -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn secs_to_cron_minute_step() { - assert_eq!(secs_to_cron(60), "0 * * * * *"); - assert_eq!(secs_to_cron(120), "0 */2 * * * *"); - assert_eq!(secs_to_cron(1800), "0 */30 * * * *"); - } - - #[test] - fn secs_to_cron_hour_step() { - assert_eq!(secs_to_cron(3600), "0 0 * * * *"); - assert_eq!(secs_to_cron(7200), "0 0 */2 * * *"); - // 6h - assert_eq!(secs_to_cron(21_600), "0 0 */6 * * *"); - } - - #[test] - fn secs_to_cron_daily_for_long_intervals() { - assert_eq!(secs_to_cron(86_400), "0 0 0 * * *"); - // "Every 48h" gets folded into daily. - assert_eq!(secs_to_cron(2 * 86_400), "0 0 0 * * *"); - } - - #[test] - fn secs_to_cron_clamps_to_min() { - // 10s clamps up to 60s → "0 * * * * *". - assert_eq!(secs_to_cron(10), "0 * * * * *"); - } +/// Read the resolved server-wide default cron schedule. Convenience for +/// callers (HTTP handlers, scheduler reconcile) that need it without +/// pulling in the schedule module directly. +pub async fn read_server_default_cron(settings: &SettingsService) -> String { + read_default_cron_schedule(settings).await } diff --git a/src/services/plugin/protocol.rs b/src/services/plugin/protocol.rs index c81aaaa2..cf08858d 100644 --- a/src/services/plugin/protocol.rs +++ b/src/services/plugin/protocol.rs @@ -416,11 +416,6 @@ pub struct ReleaseSourceCapability { /// Whether the plugin announces volume-level releases. #[serde(default)] pub can_announce_volumes: bool, - /// Default poll interval in seconds. Used when a `release_sources` row - /// for this plugin doesn't override it. Server settings can also set a - /// global default that takes precedence at schedule resolution time. - #[serde(default)] - pub default_poll_interval_s: u32, } impl Default for ReleaseSourceCapability { @@ -431,7 +426,6 @@ impl Default for ReleaseSourceCapability { requires_external_ids: Vec::new(), can_announce_chapters: true, can_announce_volumes: true, - default_poll_interval_s: 86_400, } } } @@ -2273,7 +2267,6 @@ mod tests { requires_external_ids: vec!["mangaupdates".to_string()], can_announce_chapters: true, can_announce_volumes: false, - default_poll_interval_s: 3600, }; let json = serde_json::to_value(&cap).unwrap(); assert_eq!(json["kinds"], json!(["rss-uploader"])); @@ -2281,7 +2274,6 @@ mod tests { assert_eq!(json["requiresExternalIds"], json!(["mangaupdates"])); assert!(json["canAnnounceChapters"].as_bool().unwrap()); assert!(!json["canAnnounceVolumes"].as_bool().unwrap()); - assert_eq!(json["defaultPollIntervalS"], 3600); } #[test] @@ -2324,8 +2316,7 @@ mod tests { "requiresAliases": true, "requiresExternalIds": [], "canAnnounceChapters": true, - "canAnnounceVolumes": true, - "defaultPollIntervalS": 3600 + "canAnnounceVolumes": true } } }); @@ -2334,7 +2325,6 @@ mod tests { let cap = manifest.capabilities.release_source.unwrap(); assert_eq!(cap.kinds, vec![ReleaseSourceKind::RssUploader]); assert!(cap.requires_aliases); - assert_eq!(cap.default_poll_interval_s, 3600); } #[test] diff --git a/src/services/plugin/releases_handler.rs b/src/services/plugin/releases_handler.rs index 12328f9c..c8d7b023 100644 --- a/src/services/plugin/releases_handler.rs +++ b/src/services/plugin/releases_handler.rs @@ -34,7 +34,6 @@ use crate::scheduler::Scheduler; use crate::services::release::candidate::ReleaseCandidate; use crate::services::release::languages::{includes, resolve_for_series}; use crate::services::release::matcher::{evaluate, resolve_threshold}; -use crate::services::release::schedule::{DEFAULT_POLL_INTERVAL_S, MIN_POLL_INTERVAL_S}; /// Default page size for `releases/list_tracked` when the caller doesn't /// specify one. Matches the Phase 3 risk-mitigation note. @@ -528,8 +527,8 @@ impl ReleasesRequestHandler { /// /// - **Upsert** every entry on `(plugin_id, source_key)`. New rows are /// inserted; existing rows have only the plugin-owned descriptive - /// fields refreshed. User-managed fields (`enabled`, `poll_interval_s`) - /// survive across re-registrations so an admin's interval override or + /// fields refreshed. User-managed fields (`enabled`, `cron_schedule`) + /// survive across re-registrations so an admin's schedule override or /// disable toggle isn't trampled when the plugin restarts. /// - **Prune** rows owned by this plugin whose `source_key` is not in the /// request. Deletes cascade to `release_ledger`. An empty `sources` @@ -542,8 +541,9 @@ impl ReleasesRequestHandler { /// /// `kind` is validated against the `release_source` capability the plugin /// declared in its manifest, so a plugin can't register sources of a - /// `kind` outside its declared capability surface. `poll_interval_s` is - /// taken from the request only when creating new rows; updates ignore it. + /// `kind` outside its declared capability surface. New rows always start + /// with `cron_schedule = NULL` (inherit the server-wide default); admins + /// override per-row in the settings UI. async fn handle_register_sources(&self, request: &JsonRpcRequest) -> JsonRpcResponse { let id = request.id.clone(); let params: RegisterSourcesRequest = match parse_params(&request.params) { @@ -604,23 +604,17 @@ impl ReleasesRequestHandler { } } - // Resolve the per-source default poll interval. Used only when - // creating new rows; existing rows keep their interval. Falls back - // to the host-wide default when the plugin's manifest declares 0. - let raw = if self.capability.default_poll_interval_s == 0 { - DEFAULT_POLL_INTERVAL_S - } else { - self.capability.default_poll_interval_s - }; - let default_interval = (raw as i32).max(MIN_POLL_INTERVAL_S as i32); - let keep_keys: Vec = params .sources .iter() .map(|s| s.source_key.clone()) .collect(); - // Upsert each source. + // Upsert each source. New rows start with `cron_schedule = NULL`, + // i.e. they inherit the server-wide + // `release_tracking.default_cron_schedule`. Admins override per-row + // via the settings UI; existing rows preserve their override on + // re-register. let mut registered = 0u32; for src in params.sources { let new = NewReleaseSource { @@ -628,7 +622,6 @@ impl ReleasesRequestHandler { source_key: src.source_key, display_name: src.display_name, kind: src.kind, - poll_interval_s: default_interval, enabled: None, config: src.config, }; @@ -937,7 +930,6 @@ mod tests { .collect(), can_announce_chapters: true, can_announce_volumes: true, - default_poll_interval_s: 3600, } } @@ -965,7 +957,6 @@ mod tests { source_key: "feed:1".to_string(), display_name: "Feed 1".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, @@ -1967,7 +1958,7 @@ mod tests { row.id, ReleaseSourceUpdate { enabled: Some(false), - poll_interval_s: Some(900), + cron_schedule: Some(Some("0 */6 * * *".to_string())), ..Default::default() }, ) @@ -1994,8 +1985,9 @@ mod tests { assert_eq!(after.config, Some(json!({ "subscription": "fresh" }))); assert!(!after.enabled, "user-set disabled must survive re-register"); assert_eq!( - after.poll_interval_s, 900, - "user-set poll_interval_s must survive re-register" + after.cron_schedule.as_deref(), + Some("0 */6 * * *"), + "user-set cron_schedule must survive re-register" ); } @@ -2012,7 +2004,6 @@ mod tests { source_key: "default".to_string(), display_name: "MangaUpdates".to_string(), kind: kind::RSS_SERIES.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, diff --git a/src/services/release/schedule.rs b/src/services/release/schedule.rs index 9db2cb03..001f15c0 100644 --- a/src/services/release/schedule.rs +++ b/src/services/release/schedule.rs @@ -1,80 +1,56 @@ -//! Polling-interval resolution and jitter for the release-source scheduler. +//! Cron-schedule resolution for release-source polling. //! -//! The scheduler fires one tick per enabled `release_sources` row. Each -//! row's effective poll interval is resolved once at scheduler-load time: +//! Resolution chain (consumed by [`crate::scheduler::release_sources`]): //! -//! 1. `release_sources.poll_interval_s` (per-source override) wins when -//! the column is non-default. -//! 2. Otherwise the global server default -//! `release_tracking.default_poll_interval_s` is used (default -//! `86400` = once daily). +//! 1. `release_sources.cron_schedule` (per-source override) wins when set. +//! 2. Otherwise the server-wide `release_tracking.default_cron_schedule` +//! setting. +//! 3. Otherwise the compile-time fallback ([`DEFAULT_CRON_SCHEDULE`]). //! -//! Per-series overrides (`series_tracking.poll_interval_override`) are -//! consulted by plugins that opt into per-series polling — they don't -//! apply at the scheduler level, since scheduler ticks are per-source, -//! not per-series. -//! -//! Jitter is ±10% of the interval, applied at scheduler load. It spreads -//! load across many sources so a fresh restart doesn't fire all sources -//! in lockstep. -//! -//! Per-host backoff is applied multiplicatively on top of the resolved -//! interval — see [`super::backoff`]. - -use rand::RngExt; - -/// Default poll interval if no setting is configured: 24 hours. -pub const DEFAULT_POLL_INTERVAL_S: u32 = 86_400; - -/// Minimum interval the scheduler will accept. Sub-minute polling is -/// pointless for release feeds and risks rate-limit hits. -pub const MIN_POLL_INTERVAL_S: u32 = 60; - -/// Setting key for the global default. -pub const SETTING_DEFAULT_POLL_INTERVAL_S: &str = "release_tracking.default_poll_interval_s"; - -/// Resolve the effective interval (in seconds) for a source row. -/// -/// `per_source` is `release_sources.poll_interval_s` (may be the row -/// default of `0` if unset). `global_default` is the configured server -/// default; `0` falls back to [`DEFAULT_POLL_INTERVAL_S`]. The chosen -/// value is clamped to [`MIN_POLL_INTERVAL_S`]. -pub fn resolve_interval_s(per_source: i32, global_default: u32) -> u32 { - let global = if global_default == 0 { - DEFAULT_POLL_INTERVAL_S - } else { - global_default - }; - let chosen = if per_source > 0 { - per_source as u32 +//! Per-host backoff lives in [`super::backoff`] and is consulted at +//! poll-fire time (not at scheduler-load time): a throttled host's tick is +//! short-circuited rather than rewriting the cron expression. This keeps +//! the cron source-of-truth simple: one row, one schedule. + +use crate::services::settings::SettingsService; + +/// Compile-time fallback when neither the per-source override nor the +/// server-wide setting are present. Daily at midnight (5-field POSIX cron). +pub const DEFAULT_CRON_SCHEDULE: &str = "0 0 * * *"; + +/// Setting key for the server-wide default. +pub const SETTING_DEFAULT_CRON_SCHEDULE: &str = "release_tracking.default_cron_schedule"; + +/// Read the server-wide default cron schedule. Falls back to +/// [`DEFAULT_CRON_SCHEDULE`] when the setting is missing or blank. +pub async fn read_default_cron_schedule(settings: &SettingsService) -> String { + let raw = settings + .get_string(SETTING_DEFAULT_CRON_SCHEDULE, DEFAULT_CRON_SCHEDULE) + .await + .unwrap_or_else(|_| DEFAULT_CRON_SCHEDULE.to_string()); + let trimmed = raw.trim(); + if trimmed.is_empty() { + DEFAULT_CRON_SCHEDULE.to_string() } else { - global - }; - chosen.max(MIN_POLL_INTERVAL_S) -} - -/// Apply ±10% jitter to a base interval. Returns a value in -/// `[0.9 * base, 1.1 * base]`, clamped to [`MIN_POLL_INTERVAL_S`]. -pub fn jitter_interval_s(base_s: u32) -> u32 { - if base_s == 0 { - return MIN_POLL_INTERVAL_S; + trimmed.to_string() } - let mut rng = rand::rng(); - let factor: f64 = rng.random_range(0.9_f64..1.1_f64); - let jittered = (base_s as f64 * factor).round() as u32; - jittered.max(MIN_POLL_INTERVAL_S) } -/// Apply a backoff multiplier (from [`super::backoff`]) to a base interval. -/// Returns the post-backoff interval, clamped to [`MIN_POLL_INTERVAL_S`]. -pub fn apply_backoff(base_s: u32, multiplier: f64) -> u32 { - let mult = if multiplier.is_finite() && multiplier >= 1.0 { - multiplier +/// Resolve the effective cron schedule for a source row. +/// +/// `per_source` is `release_sources.cron_schedule` (NULL when the row is +/// inheriting). `server_default` is the resolved server-wide default. The +/// returned string is the raw 5- or 6-field cron expression; callers +/// normalize to the 6-field tokio-cron-scheduler format via +/// [`crate::utils::cron::normalize_cron_expression`]. +pub fn resolve_cron_schedule(per_source: Option<&str>, server_default: &str) -> String { + if let Some(cron) = per_source.map(str::trim).filter(|s| !s.is_empty()) { + cron.to_string() + } else if !server_default.trim().is_empty() { + server_default.trim().to_string() } else { - 1.0 - }; - let scaled = (base_s as f64 * mult).round() as u32; - scaled.max(MIN_POLL_INTERVAL_S) + DEFAULT_CRON_SCHEDULE.to_string() + } } #[cfg(test)] @@ -83,71 +59,22 @@ mod tests { #[test] fn resolve_uses_per_source_when_set() { - assert_eq!(resolve_interval_s(7200, 86_400), 7_200); - } - - #[test] - fn resolve_falls_back_to_global_when_per_source_zero_or_negative() { - assert_eq!(resolve_interval_s(0, 3_600), 3_600); - assert_eq!(resolve_interval_s(-1, 3_600), 3_600); - } - - #[test] - fn resolve_uses_default_when_global_zero() { - assert_eq!(resolve_interval_s(0, 0), DEFAULT_POLL_INTERVAL_S); - } - - #[test] - fn resolve_clamps_to_minimum() { - assert_eq!(resolve_interval_s(10, 86_400), MIN_POLL_INTERVAL_S); - } - - #[test] - fn jitter_stays_within_band() { - let base = 3_600u32; - for _ in 0..200 { - let j = jitter_interval_s(base); - assert!( - j >= (base as f64 * 0.9).round() as u32 - 1, - "j too low: {}", - j - ); - assert!( - j <= (base as f64 * 1.1).round() as u32 + 1, - "j too high: {}", - j - ); - } - } - - #[test] - fn jitter_clamps_to_minimum() { - // base = 30, jitter 0.9..1.1 → 27..33; clamped to 60. - for _ in 0..50 { - assert!(jitter_interval_s(30) >= MIN_POLL_INTERVAL_S); - } - } - - #[test] - fn apply_backoff_scales_when_active() { - assert_eq!(apply_backoff(3_600, 2.0), 7_200); - assert_eq!(apply_backoff(3_600, 4.0), 14_400); - } - - #[test] - fn apply_backoff_passes_through_when_inactive() { - assert_eq!(apply_backoff(3_600, 1.0), 3_600); + assert_eq!( + resolve_cron_schedule(Some("0 */6 * * *"), "0 0 * * *"), + "0 */6 * * *" + ); } #[test] - fn apply_backoff_rejects_invalid_multipliers() { - assert_eq!(apply_backoff(3_600, 0.5), 3_600); - assert_eq!(apply_backoff(3_600, f64::NAN), 3_600); - assert_eq!(apply_backoff(3_600, -1.0), 3_600); + fn resolve_falls_back_to_server_default_when_per_source_blank() { + assert_eq!(resolve_cron_schedule(None, "0 0 * * *"), "0 0 * * *"); + assert_eq!(resolve_cron_schedule(Some(""), "0 0 * * *"), "0 0 * * *"); + assert_eq!(resolve_cron_schedule(Some(" "), "0 0 * * *"), "0 0 * * *"); } #[test] - fn apply_backoff_clamps_to_min() { - assert_eq!(apply_backoff(20, 1.0), MIN_POLL_INTERVAL_S); + fn resolve_uses_compile_time_default_when_both_blank() { + assert_eq!(resolve_cron_schedule(None, ""), DEFAULT_CRON_SCHEDULE); + assert_eq!(resolve_cron_schedule(None, " "), DEFAULT_CRON_SCHEDULE); } } diff --git a/src/tasks/handlers/poll_release_source.rs b/src/tasks/handlers/poll_release_source.rs index bc099815..b7301d00 100644 --- a/src/tasks/handlers/poll_release_source.rs +++ b/src/tasks/handlers/poll_release_source.rs @@ -734,7 +734,7 @@ mod tests { display_name: "n".to_string(), kind: kind::RSS_UPLOADER.to_string(), enabled: true, - poll_interval_s: 3600, + cron_schedule: None, last_polled_at: None, last_error: None, last_error_at: None, @@ -779,7 +779,6 @@ mod tests { source_key: "k".to_string(), display_name: "Nyaa".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: Some(false), config: None, }, @@ -814,7 +813,6 @@ mod tests { source_key: "metadata-piggyback".to_string(), display_name: "Metadata gap".to_string(), kind: kind::METADATA_PIGGYBACK.to_string(), - poll_interval_s: 86_400, enabled: None, config: None, }, @@ -850,7 +848,6 @@ mod tests { source_key: "k".to_string(), display_name: "Nope".to_string(), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, diff --git a/tests/api/releases.rs b/tests/api/releases.rs index c5eafba8..4902acda 100644 --- a/tests/api/releases.rs +++ b/tests/api/releases.rs @@ -14,7 +14,7 @@ use codex::db::ScanningStrategy; use codex::db::entities::release_sources::kind; use codex::db::repositories::{ LibraryRepository, NewReleaseEntry, NewReleaseSource, ReleaseLedgerRepository, - ReleaseSourceRepository, SeriesRepository, UserRepository, + ReleaseSourceRepository, ReleaseSourceUpdate, SeriesRepository, UserRepository, }; use codex::utils::password; use common::*; @@ -70,7 +70,6 @@ async fn make_source(db: &DatabaseConnection, source_key: &str) -> Uuid { source_key: source_key.to_string(), display_name: format!("Nyaa - {}", source_key), kind: kind::RSS_UPLOADER.to_string(), - poll_interval_s: 3600, enabled: None, config: None, }, @@ -456,7 +455,7 @@ async fn patch_source_can_disable_and_change_interval() { let body = UpdateReleaseSourceRequest { enabled: Some(false), - poll_interval_s: Some(7200), + cron_schedule: Some(Some("0 */6 * * *".to_string())), ..Default::default() }; let req = @@ -465,11 +464,12 @@ async fn patch_source_can_disable_and_change_interval() { assert_eq!(status, StatusCode::OK); let dto = dto.unwrap(); assert!(!dto.enabled); - assert_eq!(dto.poll_interval_s, 7200); + assert_eq!(dto.cron_schedule.as_deref(), Some("0 */6 * * *")); + assert_eq!(dto.effective_cron_schedule, "0 */6 * * *"); } #[tokio::test] -async fn patch_source_rejects_non_positive_interval() { +async fn patch_source_rejects_invalid_cron() { let (db, _temp) = setup_test_db().await; let id = make_source(&db, "nyaa:user:tsuna69").await; @@ -478,7 +478,7 @@ async fn patch_source_rejects_non_positive_interval() { let app = create_test_router(state).await; let body = UpdateReleaseSourceRequest { - poll_interval_s: Some(0), + cron_schedule: Some(Some("not a cron".to_string())), ..Default::default() }; let req = @@ -487,6 +487,39 @@ async fn patch_source_rejects_non_positive_interval() { assert_eq!(status, StatusCode::BAD_REQUEST); } +#[tokio::test] +async fn patch_source_clears_cron_with_explicit_null() { + let (db, _temp) = setup_test_db().await; + let id = make_source(&db, "nyaa:user:tsuna69").await; + + // Seed a per-source override. + ReleaseSourceRepository::update( + &db, + id, + ReleaseSourceUpdate { + cron_schedule: Some(Some("0 */6 * * *".to_string())), + ..Default::default() + }, + ) + .await + .unwrap(); + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + // Send `cron_schedule: null` to clear the override. + let body = serde_json::json!({ "cronSchedule": null }); + let req = + patch_json_request_with_auth(&format!("/api/v1/release-sources/{}", id), &body, &token); + let (status, dto): (StatusCode, Option) = make_json_request(app, req).await; + assert_eq!(status, StatusCode::OK); + let dto = dto.unwrap(); + assert!(dto.cron_schedule.is_none(), "override cleared"); + // effectiveCronSchedule falls through to the server-wide default. + assert!(!dto.effective_cron_schedule.is_empty()); +} + #[tokio::test] async fn patch_source_404_for_missing() { let (db, _temp) = setup_test_db().await; @@ -734,13 +767,13 @@ async fn reset_preserves_user_managed_source_fields() { let (db, _temp) = setup_test_db().await; let source = make_source(&db, "nyaa:user:tsuna69").await; - // Admin disables the source and overrides the interval. + // Admin disables the source and overrides the schedule. ReleaseSourceRepository::update( &db, source, ReleaseSourceUpdate { enabled: Some(false), - poll_interval_s: Some(900), + cron_schedule: Some(Some("0 */6 * * *".to_string())), display_name: Some("Custom Name".to_string()), ..Default::default() }, @@ -762,7 +795,11 @@ async fn reset_preserves_user_managed_source_fields() { .unwrap() .unwrap(); assert!(!after.enabled, "user-set enabled flag must survive a reset"); - assert_eq!(after.poll_interval_s, 900, "interval override survives"); + assert_eq!( + after.cron_schedule.as_deref(), + Some("0 */6 * * *"), + "schedule override survives" + ); assert_eq!(after.display_name, "Custom Name", "display name preserved"); } diff --git a/web/openapi.json b/web/openapi.json index 35de4c47..c74036fc 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -7193,7 +7193,7 @@ "Releases" ], "summary": "PATCH a release source (admin-only).", - "description": "Toggle `enabled`, override `pollIntervalS`, or rename `displayName`.", + "description": "Toggle `enabled`, override `cronSchedule`, or rename `displayName`.\nSending `cronSchedule: null` clears the override and reverts the row to\ninheriting the server-wide `release_tracking.default_cron_schedule`.", "operationId": "update_release_source", "parameters": [ { @@ -7305,7 +7305,7 @@ "Releases" ], "summary": "Reset a release source to a clean slate.", - "description": "Deletes every `release_ledger` row owned by the source and clears the\nsource's transient poll state (`etag`, `last_polled_at`, `last_error`,\n`last_error_at`, `last_summary`). User-managed fields (`enabled`,\n`poll_interval_s`, `display_name`, `config`) are preserved.\n\nIntended for testing/troubleshooting: after a reset, the next poll\nfetches the upstream feed without an `If-None-Match` header (so no 304\nshort-circuit) and re-records every release as `announced`. Does NOT\nauto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after\nresetting if you want immediate re-fetch.", + "description": "Deletes every `release_ledger` row owned by the source and clears the\nsource's transient poll state (`etag`, `last_polled_at`, `last_error`,\n`last_error_at`, `last_summary`). User-managed fields (`enabled`,\n`cron_schedule`, `display_name`, `config`) are preserved.\n\nIntended for testing/troubleshooting: after a reset, the next poll\nfetches the upstream feed without an `If-None-Match` header (so no 304\nshort-circuit) and re-records every release as `announced`. Does NOT\nauto-enqueue a poll — call `POST /release-sources/{id}/poll-now` after\nresetting if you want immediate re-fetch.", "operationId": "reset_release_source", "parameters": [ { @@ -33375,7 +33375,7 @@ "displayName", "kind", "enabled", - "pollIntervalS", + "effectiveCronSchedule", "createdAt", "updatedAt" ], @@ -33387,9 +33387,20 @@ "type": "string", "format": "date-time" }, + "cronSchedule": { + "type": [ + "string", + "null" + ], + "description": "Per-source cron override (5-field POSIX cron). NULL when the row\ninherits the server-wide `release_tracking.default_cron_schedule`." + }, "displayName": { "type": "string" }, + "effectiveCronSchedule": { + "type": "string", + "description": "The cron expression actually used by the scheduler for this source:\nthe row's `cron_schedule` if set, otherwise the resolved server-wide\ndefault. Lets the UI display \"Daily (Default)\" without needing to\nfetch the global setting separately." + }, "enabled": { "type": "boolean" }, @@ -33441,10 +33452,6 @@ "description": "Owning plugin id, or `core` for in-core synthetic sources.", "example": "release-nyaa" }, - "pollIntervalS": { - "type": "integer", - "format": "int32" - }, "sourceKey": { "type": "string", "description": "Plugin-defined unique key.", @@ -38699,8 +38706,15 @@ }, "UpdateReleaseSourceRequest": { "type": "object", - "description": "PATCH payload for a release source. All fields optional; omit to leave alone.", + "description": "PATCH payload for a release source. All fields optional; omit to leave alone.\n\n`cron_schedule` uses double-Option semantics:\n- field absent (`None`): leave the row's cron_schedule unchanged\n- explicit `null` (`Some(None)`) / `\"\"` / `\" \"`: clear the override\n (revert to inheriting the server-wide\n `release_tracking.default_cron_schedule`)\n- `Some(Some(\"0 */6 * * *\"))`: set a per-source override", "properties": { + "cronSchedule": { + "type": [ + "string", + "null" + ], + "description": "5-field POSIX cron expression. Use `null` (or empty string) to\nclear the override and inherit the server-wide default." + }, "displayName": { "type": [ "string", @@ -38712,14 +38726,6 @@ "boolean", "null" ] - }, - "pollIntervalS": { - "type": [ - "integer", - "null" - ], - "format": "int32", - "description": "Polling interval override (seconds). Must be > 0." } } }, diff --git a/web/src/api/releases.ts b/web/src/api/releases.ts index 4115cbe5..b7a0147c 100644 --- a/web/src/api/releases.ts +++ b/web/src/api/releases.ts @@ -180,7 +180,7 @@ export const releaseSourcesApi = { /** * Drop every ledger row for this source and clear its transient poll * state (etag, last_polled_at, last_error, last_summary). User-managed - * fields (enabled, pollIntervalS, displayName, config) are preserved. + * fields (enabled, cronSchedule, displayName, config) are preserved. * * Used as a "force re-emit" lever for testing: after a reset, the next * poll fetches the upstream feed without `If-None-Match` (no 304 diff --git a/web/src/components/series/SeriesReleasesPanel.test.tsx b/web/src/components/series/SeriesReleasesPanel.test.tsx index 5a0604f8..dbf9cb82 100644 --- a/web/src/components/series/SeriesReleasesPanel.test.tsx +++ b/web/src/components/series/SeriesReleasesPanel.test.tsx @@ -163,7 +163,8 @@ describe("SeriesReleasesPanel", () => { sourceKey: "nyaa:user:tsuna69", displayName: "Nyaa - tsuna69", kind: "rss_uploader", - pollIntervalS: 3600, + cronSchedule: null, + effectiveCronSchedule: "0 * * * *", enabled: true, config: null, createdAt: "2026-01-01T00:00:00Z", diff --git a/web/src/pages/ReleasesInbox.test.tsx b/web/src/pages/ReleasesInbox.test.tsx index 7626881f..13bb9782 100644 --- a/web/src/pages/ReleasesInbox.test.tsx +++ b/web/src/pages/ReleasesInbox.test.tsx @@ -74,7 +74,8 @@ function source(over: Partial = {}): ReleaseSource { pluginId: "release-mangaupdates", kind: "metadata-feed", enabled: true, - pollIntervalS: 86400, + cronSchedule: null, + effectiveCronSchedule: "0 0 * * *", createdAt: "2026-05-01T00:00:00Z", updatedAt: "2026-05-01T00:00:00Z", ...over, diff --git a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx index af89b590..f3476f5e 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.test.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.test.tsx @@ -47,7 +47,8 @@ function source(over: Partial = {}): ReleaseSource { displayName: "MangaUpdates batch", kind: "rss-series", enabled: true, - pollIntervalS: 21600, + cronSchedule: null, + effectiveCronSchedule: "0 0 * * *", lastPolledAt: "2026-05-01T00:00:00Z", lastError: null, lastErrorAt: null, diff --git a/web/src/pages/settings/ReleaseTrackingSettings.tsx b/web/src/pages/settings/ReleaseTrackingSettings.tsx index 188d57c5..1743c67e 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.tsx @@ -1,5 +1,6 @@ import { ActionIcon, + Anchor, Badge, Box, Button, @@ -7,7 +8,6 @@ import { Group, Loader, MultiSelect, - NumberInput, Stack, Switch, Table, @@ -26,11 +26,14 @@ import { IconTrash, } from "@tabler/icons-react"; import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"; +import { CronExpressionParser } from "cron-parser"; +import { toString as cronToString } from "cronstrue"; import { formatDistanceToNow } from "date-fns"; import { type Dispatch, type SetStateAction, useMemo, useState } from "react"; import { pluginsApi } from "@/api/plugins"; import type { ReleaseSource } from "@/api/releases"; import { settingsApi } from "@/api/settings"; +import { CronInput } from "@/components/forms/CronInput"; import { usePollReleaseSourceNow, useReleaseSources, @@ -56,19 +59,28 @@ function parseArraySetting(value: string | undefined | null): string[] { } } -const PRESETS = [ - { value: 3600, label: "1h" }, - { value: 21600, label: "6h" }, - { value: 43200, label: "12h" }, - { value: 86400, label: "Daily" }, - { value: 604800, label: "Weekly" }, -]; - -function intervalLabel(seconds: number): string { - const preset = PRESETS.find((p) => p.value === seconds); - if (preset) return preset.label; - if (seconds % 3600 === 0) return `${seconds / 3600}h`; - return `${seconds}s`; +/** + * Render a cron expression as a human-readable phrase. Mirrors the logic in + * `` (5-part → cronstrue normalization). Returns the raw expression + * as a fallback if parsing fails so we still show *something* meaningful. + */ +function describeCron(expression: string): string { + const trimmed = expression.trim(); + if (!trimmed) return ""; + try { + CronExpressionParser.parse(trimmed); + const parts = trimmed.split(/\s+/); + const normalized = + parts.length === 5 + ? parts.map((p) => (p.startsWith("/") ? `*${p}` : p)).join(" ") + : trimmed; + return cronToString(normalized, { + throwExceptionOnParseError: false, + verbose: false, + }); + } catch { + return trimmed; + } } export function ReleaseTrackingSettings() { @@ -164,10 +176,12 @@ export function ReleaseTrackingSettings() { update: { enabled }, }) } - onIntervalChange={(seconds) => + onCronScheduleChange={(cronSchedule) => update.mutate({ sourceId: source.id, - update: { pollIntervalS: seconds }, + // Send `null` to clear the override and revert to + // inheriting the server-wide default. + update: { cronSchedule }, }) } onPollNow={() => { @@ -370,7 +384,8 @@ function NotificationPreferencesCard() { interface RowProps { source: ReleaseSource; onToggle: (enabled: boolean) => void; - onIntervalChange: (seconds: number) => void; + /** `null` clears the override and reverts to the server-wide default. */ + onCronScheduleChange: (cronSchedule: string | null) => void; onPollNow: () => void; pollNowPending: boolean; onReset: () => void; @@ -380,18 +395,46 @@ interface RowProps { function ReleaseSourceRow({ source, onToggle, - onIntervalChange, + onCronScheduleChange, onPollNow, pollNowPending, onReset, resetPending, }: RowProps) { - const [draft, setDraft] = useState(source.pollIntervalS); + // `cronSchedule != null` means the row has a per-source override; render the + // editor inline. Otherwise render the inherited default with an "Override" + // affordance. + const [isOverriding, setIsOverriding] = useState( + source.cronSchedule !== null, + ); + const [draft, setDraft] = useState( + source.cronSchedule ?? source.effectiveCronSchedule, + ); const lastPolled = source.lastPolledAt ? formatDistanceToNow(new Date(source.lastPolledAt), { addSuffix: true }) : "—"; + const commitDraft = () => { + const trimmed = draft.trim(); + if (!trimmed) { + // Empty editor = revert to inherit. + if (source.cronSchedule !== null) onCronScheduleChange(null); + setIsOverriding(false); + setDraft(source.effectiveCronSchedule); + return; + } + if (trimmed !== source.cronSchedule) { + onCronScheduleChange(trimmed); + } + }; + + const resetToDefault = () => { + if (source.cronSchedule !== null) onCronScheduleChange(null); + setIsOverriding(false); + setDraft(source.effectiveCronSchedule); + }; + return ( @@ -413,38 +456,46 @@ function ReleaseSourceRow({ - - { - if (typeof value === "number") { - setDraft(value); - } else if (value === "") { - setDraft(null); - } - }} - onBlur={() => { - if ( - draft !== null && - draft > 0 && - draft !== source.pollIntervalS - ) { - onIntervalChange(draft); - } else { - setDraft(source.pollIntervalS); - } - }} - min={60} - max={604800} - step={60} - w={120} - suffix=" s" - aria-label="Poll interval seconds" - /> - - ≈ {intervalLabel(source.pollIntervalS)} - - + {isOverriding ? ( + + + + Reset to default + + + ) : ( + + + {describeCron(source.effectiveCronSchedule)}{" "} + + (Default) + + + { + setIsOverriding(true); + setDraft(source.effectiveCronSchedule); + }} + > + Override + + + )} diff --git a/web/src/types/api.generated.ts b/web/src/types/api.generated.ts index c6868081..0b1a48c8 100644 --- a/web/src/types/api.generated.ts +++ b/web/src/types/api.generated.ts @@ -2484,7 +2484,9 @@ export interface paths { head?: never; /** * PATCH a release source (admin-only). - * @description Toggle `enabled`, override `pollIntervalS`, or rename `displayName`. + * @description Toggle `enabled`, override `cronSchedule`, or rename `displayName`. + * Sending `cronSchedule: null` clears the override and reverts the row to + * inheriting the server-wide `release_tracking.default_cron_schedule`. */ patch: operations["update_release_source"]; trace?: never; @@ -2525,7 +2527,7 @@ export interface paths { * @description Deletes every `release_ledger` row owned by the source and clears the * source's transient poll state (`etag`, `last_polled_at`, `last_error`, * `last_error_at`, `last_summary`). User-managed fields (`enabled`, - * `poll_interval_s`, `display_name`, `config`) are preserved. + * `cron_schedule`, `display_name`, `config`) are preserved. * * Intended for testing/troubleshooting: after a reset, the next poll * fetches the upstream feed without an `If-None-Match` header (so no 304 @@ -14941,7 +14943,19 @@ export interface components { config?: unknown; /** Format: date-time */ createdAt: string; + /** + * @description Per-source cron override (5-field POSIX cron). NULL when the row + * inherits the server-wide `release_tracking.default_cron_schedule`. + */ + cronSchedule?: string | null; displayName: string; + /** + * @description The cron expression actually used by the scheduler for this source: + * the row's `cron_schedule` if set, otherwise the resolved server-wide + * default. Lets the UI display "Daily (Default)" without needing to + * fetch the global setting separately. + */ + effectiveCronSchedule: string; enabled: boolean; /** @description Opaque etag/cursor used for conditional fetches. */ etag?: string | null; @@ -14969,8 +14983,6 @@ export interface components { * @example release-nyaa */ pluginId: string; - /** Format: int32 */ - pollIntervalS: number; /** * @description Plugin-defined unique key. * @example nyaa:user:tsuna69 @@ -17798,15 +17810,24 @@ export interface components { /** @description New state. See [`ReleaseLedgerEntryDto::state`] for allowed values. */ state?: string | null; }; - /** @description PATCH payload for a release source. All fields optional; omit to leave alone. */ + /** + * @description PATCH payload for a release source. All fields optional; omit to leave alone. + * + * `cron_schedule` uses double-Option semantics: + * - field absent (`None`): leave the row's cron_schedule unchanged + * - explicit `null` (`Some(None)`) / `""` / `" "`: clear the override + * (revert to inheriting the server-wide + * `release_tracking.default_cron_schedule`) + * - `Some(Some("0 *\/6 * * *"))`: set a per-source override + */ UpdateReleaseSourceRequest: { - displayName?: string | null; - enabled?: boolean | null; /** - * Format: int32 - * @description Polling interval override (seconds). Must be > 0. + * @description 5-field POSIX cron expression. Use `null` (or empty string) to + * clear the override and inherit the server-wide default. */ - pollIntervalS?: number | null; + cronSchedule?: string | null; + displayName?: string | null; + enabled?: boolean | null; }; /** * @description PATCH payload for tracking config. All fields are optional: From a7af2d0ae33370934ca5a9e322c26dd97cc31951 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Tue, 5 May 2026 20:08:20 -0700 Subject: [PATCH 25/29] feat(release-tracking): expose default cron schedule on settings page The `release_tracking.default_cron_schedule` setting was seeded but had no UI: ServerSettings hides the "Release Tracking" category in favor of the dedicated page, and that page only handled notification filters and per-source rows. Admins had no way to change the server-wide default. Add a "Default schedule" card to the Release Tracking settings page that reads/writes the setting via the existing settings API and uses the shared ``. Saving invalidates the source list query so every inheriting row's "(Default)" label refreshes immediately. Also fix two related bugs surfaced while testing: - `ReleaseSourceDto.cron_schedule` was annotated with `skip_serializing_if = "Option::is_none"`, so inheriting rows arrived on the wire as `undefined` rather than `null`. Drop the attribute so the field is always present, eliminating the omit-vs-null ambiguity for clients. - `ReleaseSourceRow` checked `cronSchedule !== null`, which was true for `undefined`, so the editor opened pre-filled on every inheriting row. Switch to a `Boolean(cronSchedule)` check that handles `null`, `undefined`, and empty strings uniformly. --- docs/api/openapi.json | 2 +- src/api/routes/v1/dto/release.rs | 5 +- web/openapi.json | 2 +- .../settings/ReleaseTrackingSettings.tsx | 105 ++++++++++++++++-- web/src/types/api.generated.ts | 4 +- 5 files changed, 104 insertions(+), 14 deletions(-) diff --git a/docs/api/openapi.json b/docs/api/openapi.json index c74036fc..343c4b67 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -33392,7 +33392,7 @@ "string", "null" ], - "description": "Per-source cron override (5-field POSIX cron). NULL when the row\ninherits the server-wide `release_tracking.default_cron_schedule`." + "description": "Per-source cron override (5-field POSIX cron). `null` when the row\ninherits the server-wide `release_tracking.default_cron_schedule`.\nAlways present in the response (not omitted on null) so clients can\ndistinguish \"inheriting\" from \"field missing.\"" }, "displayName": { "type": "string" diff --git a/src/api/routes/v1/dto/release.rs b/src/api/routes/v1/dto/release.rs index 9b661763..24ab3707 100644 --- a/src/api/routes/v1/dto/release.rs +++ b/src/api/routes/v1/dto/release.rs @@ -145,9 +145,10 @@ pub struct ReleaseSourceDto { /// `rss-uploader` | `rss-series` | `api-feed` | `metadata-feed` | `metadata-piggyback`. pub kind: String, pub enabled: bool, - /// Per-source cron override (5-field POSIX cron). NULL when the row + /// Per-source cron override (5-field POSIX cron). `null` when the row /// inherits the server-wide `release_tracking.default_cron_schedule`. - #[serde(skip_serializing_if = "Option::is_none")] + /// Always present in the response (not omitted on null) so clients can + /// distinguish "inheriting" from "field missing." pub cron_schedule: Option, /// The cron expression actually used by the scheduler for this source: /// the row's `cron_schedule` if set, otherwise the resolved server-wide diff --git a/web/openapi.json b/web/openapi.json index c74036fc..343c4b67 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -33392,7 +33392,7 @@ "string", "null" ], - "description": "Per-source cron override (5-field POSIX cron). NULL when the row\ninherits the server-wide `release_tracking.default_cron_schedule`." + "description": "Per-source cron override (5-field POSIX cron). `null` when the row\ninherits the server-wide `release_tracking.default_cron_schedule`.\nAlways present in the response (not omitted on null) so clients can\ndistinguish \"inheriting\" from \"field missing.\"" }, "displayName": { "type": "string" diff --git a/web/src/pages/settings/ReleaseTrackingSettings.tsx b/web/src/pages/settings/ReleaseTrackingSettings.tsx index 1743c67e..502062d6 100644 --- a/web/src/pages/settings/ReleaseTrackingSettings.tsx +++ b/web/src/pages/settings/ReleaseTrackingSettings.tsx @@ -44,6 +44,7 @@ import { useUserPreference } from "@/hooks/useUserPreference"; const SETTING_NOTIFY_LANGUAGES = "release_tracking.notify_languages"; const SETTING_NOTIFY_PLUGINS = "release_tracking.notify_plugins"; +const SETTING_DEFAULT_CRON_SCHEDULE = "release_tracking.default_cron_schedule"; const PREF_MUTED_SERIES = "release_tracking.muted_series_ids"; /** Parse a settings-table JSON-array value back to a string list. */ @@ -128,6 +129,8 @@ export function ReleaseTrackingSettings() { fetch. + + {sourcesQuery.isLoading ? ( @@ -215,6 +218,90 @@ export function ReleaseTrackingSettings() { ); } +/** + * Server-wide default cron schedule for release-source polling. Each + * `release_sources` row whose `cron_schedule` is NULL inherits this value. + * The compile-time fallback (`"0 0 * * *"`) only applies if the setting row + * itself is missing. + */ +function DefaultScheduleCard() { + const queryClient = useQueryClient(); + const settingQuery = useQuery({ + queryKey: ["admin-setting", SETTING_DEFAULT_CRON_SCHEDULE], + queryFn: () => settingsApi.get(SETTING_DEFAULT_CRON_SCHEDULE), + }); + + const serverValue = settingQuery.data?.value ?? ""; + const [draft, setDraft] = useState(serverValue); + // Sync local draft when the server value changes (initial load, refetch). + // We deliberately don't useEffect: comparing the string each render is + // cheap, and we only update when the upstream value actually changes. + if (draft === "" && serverValue !== "" && !settingQuery.isFetching) { + setDraft(serverValue); + } + + const updateMutation = useMutation({ + mutationFn: (value: string) => + settingsApi.update(SETTING_DEFAULT_CRON_SCHEDULE, { value }), + onSuccess: () => { + queryClient.invalidateQueries({ + queryKey: ["admin-setting", SETTING_DEFAULT_CRON_SCHEDULE], + }); + // Source rows display `effectiveCronSchedule` resolved server-side, + // so a default change must invalidate the source list to refresh + // every inheriting row's "(Default)" label. + queryClient.invalidateQueries({ queryKey: ["release-sources"] }); + notifications.show({ + title: "Default schedule saved", + message: + "All sources without a per-row override will use the new schedule.", + color: "green", + }); + }, + onError: (err: Error) => + notifications.show({ + title: "Failed to save", + message: err.message ?? "Could not update default schedule.", + color: "red", + }), + }); + + const commit = () => { + const trimmed = draft.trim(); + if (!trimmed || trimmed === serverValue) { + setDraft(serverValue); + return; + } + updateMutation.mutate(trimmed); + }; + + return ( + + + + + Default schedule + + + Server-wide default cron used by every release source that doesn't + have its own per-row override. Changing this propagates immediately to + inheriting rows. + + + + + ); +} + function NotificationPreferencesCard() { const queryClient = useQueryClient(); @@ -401,14 +488,14 @@ function ReleaseSourceRow({ onReset, resetPending, }: RowProps) { - // `cronSchedule != null` means the row has a per-source override; render the - // editor inline. Otherwise render the inherited default with an "Override" - // affordance. - const [isOverriding, setIsOverriding] = useState( - source.cronSchedule !== null, - ); + // Truthy `cronSchedule` means the row has a per-source override; render the + // editor inline. The server omits the field entirely (rather than sending + // `null`) when the row is inheriting, so accept both `null` and `undefined` + // as "no override." + const hasOverride = Boolean(source.cronSchedule); + const [isOverriding, setIsOverriding] = useState(hasOverride); const [draft, setDraft] = useState( - source.cronSchedule ?? source.effectiveCronSchedule, + source.cronSchedule || source.effectiveCronSchedule, ); const lastPolled = source.lastPolledAt @@ -419,7 +506,7 @@ function ReleaseSourceRow({ const trimmed = draft.trim(); if (!trimmed) { // Empty editor = revert to inherit. - if (source.cronSchedule !== null) onCronScheduleChange(null); + if (source.cronSchedule) onCronScheduleChange(null); setIsOverriding(false); setDraft(source.effectiveCronSchedule); return; @@ -430,7 +517,7 @@ function ReleaseSourceRow({ }; const resetToDefault = () => { - if (source.cronSchedule !== null) onCronScheduleChange(null); + if (source.cronSchedule) onCronScheduleChange(null); setIsOverriding(false); setDraft(source.effectiveCronSchedule); }; diff --git a/web/src/types/api.generated.ts b/web/src/types/api.generated.ts index 0b1a48c8..70a53be5 100644 --- a/web/src/types/api.generated.ts +++ b/web/src/types/api.generated.ts @@ -14944,8 +14944,10 @@ export interface components { /** Format: date-time */ createdAt: string; /** - * @description Per-source cron override (5-field POSIX cron). NULL when the row + * @description Per-source cron override (5-field POSIX cron). `null` when the row * inherits the server-wide `release_tracking.default_cron_schedule`. + * Always present in the response (not omitted on null) so clients can + * distinguish "inheriting" from "field missing." */ cronSchedule?: string | null; displayName: string; From 2e28eb0c76fd37e7b7524905e749afe2bc6ea370 Mon Sep 17 00:00:00 2001 From: Sylvain Cau Date: Tue, 5 May 2026 21:02:37 -0700 Subject: [PATCH 26/29] feat(release-tracking): auto-ignore releases for already-owned volumes/chapters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add an `ignored` ledger state distinct from `dismissed` (a user decision) that ingestion applies automatically when a release directly matches a volume or chapter the user already owns. Direct matches only: a release for "Vol 1" matches an owned whole vol 1 but not a "Ch 5 of vol 1" book, and a release for "Ch 12" never matches based on owned volume metadata (chapter→volume mapping is unreliable upstream). When no book in the series carries volume metadata, fall back to volumes_owned_count for volume-only releases. Compute the initial state in the poll handler and the reverse-RPC record handler, with a per-poll cache to avoid N+1 owned-key lookups, and skip the release_announced SSE emit when a row lands as anything other than `announced` so notifications and inbox stay quiet. Add `ignore` and `reset` bulk actions to /api/v1/releases/bulk; reset returns any state to `announced`, giving users a universal undo for mistaken dismiss/acquire/ignore. The series panel's "New" filter already filters to `announced` and the cross-series inbox defaults to the same, so the new state is hidden from both surfaces by default and visible under "All" or the new "Ignored" filter option. Includes unit tests for the predicate, repository tests for the owned-keys query, ledger tests for initial_state, and API tests for the new bulk actions. --- docs/api/openapi.json | 4 +- src/api/routes/v1/dto/release.rs | 7 + src/api/routes/v1/handlers/releases.rs | 22 +- src/db/entities/release_ledger.rs | 13 +- src/db/repositories/release_ledger.rs | 36 ++- src/db/repositories/series.rs | 181 ++++++++++++++ src/services/plugin/releases_handler.rs | 40 +++- src/services/release/auto_ignore.rs | 220 ++++++++++++++++++ src/services/release/matcher.rs | 1 + src/services/release/mod.rs | 1 + src/tasks/handlers/poll_release_source.rs | 74 +++++- tests/api/releases.rs | 78 +++++++ web/openapi.json | 4 +- .../releases/ReleasesBulkActionBar.tsx | 28 ++- web/src/hooks/useReleases.ts | 6 +- web/src/pages/ReleasesInbox.tsx | 1 + web/src/types/api.generated.ts | 12 +- 17 files changed, 704 insertions(+), 24 deletions(-) create mode 100644 src/services/release/auto_ignore.rs diff --git a/docs/api/openapi.json b/docs/api/openapi.json index 343c4b67..937a3a3a 100644 --- a/docs/api/openapi.json +++ b/docs/api/openapi.json @@ -7467,7 +7467,7 @@ "Releases" ], "summary": "Apply an action to a batch of ledger rows.", - "description": "`dismiss` and `mark-acquired` set state in-place. `delete` removes\nthe rows and clears the affected sources' etags so the next poll\nre-fetches without `If-None-Match`. All three run as bulk SQL — no\nper-row round trips — so this scales to deleting thousands of rows in\none call.", + "description": "`dismiss`, `mark-acquired`, `ignore`, and `reset` all set state\nin-place. `delete` removes the rows and clears the affected sources'\netags so the next poll re-fetches without `If-None-Match`. All run\nas bulk SQL (no per-row round trips), so this scales to thousands of\nrows in one call.", "operationId": "bulk_release_action", "requestBody": { "content": { @@ -21653,6 +21653,8 @@ "enum": [ "dismiss", "mark-acquired", + "ignore", + "reset", "delete" ] }, diff --git a/src/api/routes/v1/dto/release.rs b/src/api/routes/v1/dto/release.rs index 24ab3707..f386c894 100644 --- a/src/api/routes/v1/dto/release.rs +++ b/src/api/routes/v1/dto/release.rs @@ -344,6 +344,13 @@ pub enum BulkReleaseAction { Dismiss, /// Set state to `marked_acquired`. MarkAcquired, + /// Set state to `ignored`. Same effect as auto-ignore at ingestion, + /// but applied manually after the fact. + Ignore, + /// Reset state back to `announced`. Universal undo for `dismissed`, + /// `marked_acquired`, and `ignored`. Does not re-emit the release- + /// announced SSE event (the user is the one driving the change). + Reset, /// Hard-delete the ledger rows. Each affected source's `etag` is /// cleared so the next poll re-fetches without `If-None-Match` and /// re-announces the deleted releases. diff --git a/src/api/routes/v1/handlers/releases.rs b/src/api/routes/v1/handlers/releases.rs index ca09d752..8a1eb88e 100644 --- a/src/api/routes/v1/handlers/releases.rs +++ b/src/api/routes/v1/handlers/releases.rs @@ -713,11 +713,11 @@ pub async fn delete_release( /// Apply an action to a batch of ledger rows. /// -/// `dismiss` and `mark-acquired` set state in-place. `delete` removes -/// the rows and clears the affected sources' etags so the next poll -/// re-fetches without `If-None-Match`. All three run as bulk SQL — no -/// per-row round trips — so this scales to deleting thousands of rows in -/// one call. +/// `dismiss`, `mark-acquired`, `ignore`, and `reset` all set state +/// in-place. `delete` removes the rows and clears the affected sources' +/// etags so the next poll re-fetches without `If-None-Match`. All run +/// as bulk SQL (no per-row round trips), so this scales to thousands of +/// rows in one call. #[utoipa::path( post, path = "/api/v1/releases/bulk", @@ -784,6 +784,18 @@ pub async fn bulk_release_action( ) .await .map_err(|e| ApiError::Internal(format!("Failed to mark releases acquired: {}", e)))?, + BulkReleaseAction::Ignore => { + ReleaseLedgerRepository::set_state_many(&state.db, &request.ids, ledger_state::IGNORED) + .await + .map_err(|e| ApiError::Internal(format!("Failed to ignore releases: {}", e)))? + } + BulkReleaseAction::Reset => ReleaseLedgerRepository::set_state_many( + &state.db, + &request.ids, + ledger_state::ANNOUNCED, + ) + .await + .map_err(|e| ApiError::Internal(format!("Failed to reset releases: {}", e)))?, BulkReleaseAction::Delete => { let count = ReleaseLedgerRepository::delete_many(&state.db, &request.ids) .await diff --git a/src/db/entities/release_ledger.rs b/src/db/entities/release_ledger.rs index 23d9214d..1ce6b13a 100644 --- a/src/db/entities/release_ledger.rs +++ b/src/db/entities/release_ledger.rs @@ -38,7 +38,7 @@ pub struct Model { /// `services::release::candidate::MediaUrlKind` for the canonical list. pub media_url_kind: Option, pub confidence: f64, - /// `announced` | `dismissed` | `marked_acquired` | `hidden`. + /// `announced` | `dismissed` | `marked_acquired` | `ignored` | `hidden`. pub state: String, pub metadata: Option, pub observed_at: DateTime, @@ -84,10 +84,18 @@ pub mod state { pub const ANNOUNCED: &str = "announced"; pub const DISMISSED: &str = "dismissed"; pub const MARKED_ACQUIRED: &str = "marked_acquired"; + /// Auto-applied at ingestion when the release matches a book the user + /// already owns (direct match on volume or chapter). Distinct from + /// `dismissed`, which is a user decision. Reversible via the bulk + /// `reset` action. + pub const IGNORED: &str = "ignored"; pub const HIDDEN: &str = "hidden"; pub fn is_valid(s: &str) -> bool { - matches!(s, ANNOUNCED | DISMISSED | MARKED_ACQUIRED | HIDDEN) + matches!( + s, + ANNOUNCED | DISMISSED | MARKED_ACQUIRED | IGNORED | HIDDEN + ) } } @@ -100,6 +108,7 @@ mod tests { assert!(state::is_valid("announced")); assert!(state::is_valid("dismissed")); assert!(state::is_valid("marked_acquired")); + assert!(state::is_valid("ignored")); assert!(state::is_valid("hidden")); assert!(!state::is_valid("acquired")); assert!(!state::is_valid("new")); diff --git a/src/db/repositories/release_ledger.rs b/src/db/repositories/release_ledger.rs index b6b1f440..20583f64 100644 --- a/src/db/repositories/release_ledger.rs +++ b/src/db/repositories/release_ledger.rs @@ -37,6 +37,10 @@ pub struct NewReleaseEntry { pub confidence: f64, pub metadata: Option, pub observed_at: chrono::DateTime, + /// State to insert with. `None` defaults to `announced`. Used by the + /// poll/reverse-RPC path to insert directly as `ignored` when the + /// release matches a book the user already owns. + pub initial_state: Option, } /// Outcome of a `record` call. @@ -136,6 +140,11 @@ impl ReleaseLedgerRepository { }); } + let initial_state = match entry.initial_state { + Some(s) if state::is_valid(&s) => s, + Some(invalid) => anyhow::bail!("invalid initial_state: {}", invalid), + None => state::ANNOUNCED.to_string(), + }; let active = release_ledger::ActiveModel { id: Set(Uuid::new_v4()), series_id: Set(entry.series_id), @@ -151,7 +160,7 @@ impl ReleaseLedgerRepository { media_url: Set(entry.media_url), media_url_kind: Set(entry.media_url_kind), confidence: Set(entry.confidence), - state: Set(state::ANNOUNCED.to_string()), + state: Set(initial_state), metadata: Set(entry.metadata), observed_at: Set(entry.observed_at), created_at: Set(Utc::now()), @@ -554,9 +563,34 @@ mod tests { confidence: 0.95, metadata: None, observed_at: Utc::now(), + initial_state: None, } } + #[tokio::test] + async fn record_uses_initial_state_when_provided() { + let (db, _temp) = create_test_db().await; + let conn = db.sea_orm_connection(); + let (series_id, source_id) = setup_world(conn).await; + + // Default: lands as announced. + let default = ReleaseLedgerRepository::record(conn, entry(series_id, source_id, "rel-d")) + .await + .unwrap(); + assert_eq!(default.row.state, state::ANNOUNCED); + + // Caller-specified ignored: lands as ignored. + let mut e = entry(series_id, source_id, "rel-i"); + e.initial_state = Some(state::IGNORED.to_string()); + let ignored = ReleaseLedgerRepository::record(conn, e).await.unwrap(); + assert_eq!(ignored.row.state, state::IGNORED); + + // Invalid state: rejected. + let mut e = entry(series_id, source_id, "rel-x"); + e.initial_state = Some("not_a_state".to_string()); + assert!(ReleaseLedgerRepository::record(conn, e).await.is_err()); + } + #[tokio::test] async fn record_persists_media_url_pair() { let (db, _temp) = create_test_db().await; diff --git a/src/db/repositories/series.rs b/src/db/repositories/series.rs index e7c0bc63..f1b09a10 100644 --- a/src/db/repositories/series.rs +++ b/src/db/repositories/series.rs @@ -2067,6 +2067,62 @@ impl SeriesRepository { Ok(map) } + /// Fetch the set of owned `(volume, chapter)` keys for a series, used + /// by the release-tracking auto-ignore predicate. + /// + /// Skips books with both `volume` and `chapter` null (no signal). + /// `has_any_volume_metadata` reflects whether any non-deleted book in + /// the series carries a non-null `volume`; the count fallback in + /// [`crate::services::release::auto_ignore`] only fires when this is + /// false. + pub async fn get_owned_release_keys_for_series( + db: &DatabaseConnection, + series_id: Uuid, + ) -> Result { + use crate::services::release::auto_ignore::OwnedReleaseKeys; + + #[derive(Debug, FromQueryResult)] + struct KeyRow { + volume: Option, + chapter: Option, + } + + let rows: Vec = books::Entity::find() + .select_only() + .column_as(book_metadata::Column::Volume, "volume") + .column_as(book_metadata::Column::Chapter, "chapter") + .join(JoinType::LeftJoin, books::Relation::BookMetadata.def()) + .filter(books::Column::SeriesId.eq(series_id)) + .filter(books::Column::Deleted.eq(false)) + .into_model::() + .all(db) + .await + .context("Failed to load owned release keys")?; + + let mut keys: Vec<(Option, Option)> = Vec::with_capacity(rows.len()); + let mut has_any_volume_metadata = false; + let mut volumes_owned_count: i64 = 0; + for r in rows { + if r.volume.is_some() { + has_any_volume_metadata = true; + } + if r.volume.is_some() && r.chapter.is_none() { + volumes_owned_count += 1; + } + // Skip rows with no signal at all. + if r.volume.is_none() && r.chapter.is_none() { + continue; + } + keys.push((r.volume, r.chapter.map(f64::from))); + } + + Ok(OwnedReleaseKeys { + keys, + has_any_volume_metadata, + volumes_owned_count, + }) + } + /// Delete a series pub async fn delete(db: &DatabaseConnection, id: Uuid) -> Result<()> { Series::delete_by_id(id) @@ -3603,4 +3659,129 @@ mod tests { assert_eq!(none.local_max_volume, None); assert_eq!(none.volumes_owned, None); } + + #[tokio::test] + async fn test_owned_release_keys_with_metadata() { + let (db, _temp_dir) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let library = LibraryRepository::create( + conn, + "Test Library", + "/test/path", + ScanningStrategy::Default, + ) + .await + .unwrap(); + + let series = SeriesRepository::create(conn, library.id, "Mixed", None) + .await + .unwrap(); + + // Whole vol 1, whole vol 3, ch 12 of vol 2, pure ch 99.5, untyped book. + insert_book_with_classification(conn, series.id, library.id, "/v1.cbz", Some(1), None) + .await; + insert_book_with_classification(conn, series.id, library.id, "/v3.cbz", Some(3), None) + .await; + insert_book_with_classification( + conn, + series.id, + library.id, + "/v2c12.cbz", + Some(2), + Some(12.0), + ) + .await; + insert_book_with_classification( + conn, + series.id, + library.id, + "/c99-5.cbz", + None, + Some(99.5), + ) + .await; + insert_book_with_classification(conn, series.id, library.id, "/untyped.cbz", None, None) + .await; + + let owned = SeriesRepository::get_owned_release_keys_for_series(conn, series.id) + .await + .unwrap(); + + assert!(owned.has_any_volume_metadata); + assert_eq!(owned.volumes_owned_count, 2); + // Untyped book is filtered out; the other four contribute keys. + assert_eq!(owned.keys.len(), 4); + assert!(owned.keys.contains(&(Some(1), None))); + assert!(owned.keys.contains(&(Some(3), None))); + assert!(owned.keys.contains(&(Some(2), Some(12.0)))); + assert!(owned.keys.contains(&(None, Some(99.5)))); + } + + #[tokio::test] + async fn test_owned_release_keys_pure_count_world() { + let (db, _temp_dir) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let library = LibraryRepository::create( + conn, + "Test Library", + "/test/path", + ScanningStrategy::Default, + ) + .await + .unwrap(); + + let series = SeriesRepository::create(conn, library.id, "NoMeta", None) + .await + .unwrap(); + + // Three untyped books — count world. + for i in 1..=3 { + insert_book_with_classification( + conn, + series.id, + library.id, + &format!("/u{}.cbz", i), + None, + None, + ) + .await; + } + + let owned = SeriesRepository::get_owned_release_keys_for_series(conn, series.id) + .await + .unwrap(); + + assert!(!owned.has_any_volume_metadata); + assert_eq!(owned.volumes_owned_count, 0); + assert!(owned.keys.is_empty()); + } + + #[tokio::test] + async fn test_owned_release_keys_empty_series() { + let (db, _temp_dir) = create_test_db().await; + let conn = db.sea_orm_connection(); + + let library = LibraryRepository::create( + conn, + "Test Library", + "/test/path", + ScanningStrategy::Default, + ) + .await + .unwrap(); + + let empty = SeriesRepository::create(conn, library.id, "Empty", None) + .await + .unwrap(); + + let owned = SeriesRepository::get_owned_release_keys_for_series(conn, empty.id) + .await + .unwrap(); + + assert!(!owned.has_any_volume_metadata); + assert_eq!(owned.volumes_owned_count, 0); + assert!(owned.keys.is_empty()); + } } diff --git a/src/services/plugin/releases_handler.rs b/src/services/plugin/releases_handler.rs index c8d7b023..ac8e70ce 100644 --- a/src/services/plugin/releases_handler.rs +++ b/src/services/plugin/releases_handler.rs @@ -25,12 +25,14 @@ use super::protocol::{ JsonRpcError, JsonRpcRequest, JsonRpcResponse, ReleaseSourceCapability, RequestId, error_codes, methods, }; +use crate::db::entities::release_ledger::state as ledger_state; use crate::db::entities::release_sources::kind as source_kind; use crate::db::repositories::{ NewReleaseSource, ReleaseLedgerRepository, ReleaseSourceRepository, SeriesAliasRepository, - SeriesExternalIdRepository, SeriesTrackingRepository, TrackingUpdate, + SeriesExternalIdRepository, SeriesRepository, SeriesTrackingRepository, TrackingUpdate, }; use crate::scheduler::Scheduler; +use crate::services::release::auto_ignore::should_auto_ignore; use crate::services::release::candidate::ReleaseCandidate; use crate::services::release::languages::{includes, resolve_for_series}; use crate::services::release::matcher::{evaluate, resolve_threshold}; @@ -288,8 +290,30 @@ impl ReleasesRequestHandler { let candidate_volume = accepted.candidate.volume; let candidate_language = accepted.candidate.language.clone(); + // Auto-ignore: if the user already owns this volume/chapter, insert + // the row directly as `ignored` so it skips the inbox + notify path. + // Best-effort; on failure we fall back to the default state. + let initial_state = if candidate_volume.is_some() || candidate_chapter.is_some() { + match SeriesRepository::get_owned_release_keys_for_series(&self.db, series_id).await { + Ok(owned) => { + if should_auto_ignore(candidate_volume, candidate_chapter, &owned) { + Some(ledger_state::IGNORED.to_string()) + } else { + None + } + } + Err(e) => { + warn!(error = %e, %series_id, "owned-keys lookup failed; defaulting to announced"); + None + } + } + } else { + None + }; + // 4. Hand off to the ledger (which is itself idempotent). - let entry = accepted.into_ledger_entry(params.source_id); + let mut entry = accepted.into_ledger_entry(params.source_id); + entry.initial_state = initial_state; let outcome = match ReleaseLedgerRepository::record(&self.db, entry).await { Ok(o) => o, Err(e) => { @@ -339,7 +363,17 @@ impl ReleasesRequestHandler { // any task context (today: shouldn't happen for releases since // every record path runs inside a poll task). We log and skip // rather than silently emitting into a void. - if let Some(broadcaster) = crate::events::current_recording_broadcaster() { + // Auto-ignored rows skip the announce event: the row is on the + // ledger for audit/recovery, but the user already owns the + // matching volume/chapter so there's nothing to notify about. + if outcome.row.state != ledger_state::ANNOUNCED { + debug!( + series_id = %outcome.row.series_id, + plugin = %self.plugin_name, + state = %outcome.row.state, + "Skipping release_announced emit for non-announced state" + ); + } else if let Some(broadcaster) = crate::events::current_recording_broadcaster() { let _ = broadcaster.emit(crate::events::EntityChangeEvent::release_announced( &outcome.row, &self.plugin_name, diff --git a/src/services/release/auto_ignore.rs b/src/services/release/auto_ignore.rs new file mode 100644 index 00000000..ca7a2843 --- /dev/null +++ b/src/services/release/auto_ignore.rs @@ -0,0 +1,220 @@ +//! Decide whether an incoming release matches something the user already +//! owns, so ingestion can mark it `ignored` instead of `announced`. +//! +//! Direct matches only. We do not infer chapter ownership from owned +//! volumes (chapter→volume mapping is unreliable upstream) or vice versa. +//! +//! Inputs come from [`crate::db::repositories::SeriesRepository::get_owned_release_keys_for_series`]: +//! the set of `(volume, chapter)` pairs derived from book metadata, plus +//! a count fallback used only when no book in the series has any volume +//! metadata. +//! +//! Whole-volume ownership is signaled by `chapter = None` in the owned set; +//! chapter ownership by `chapter = Some(_)`. A release for "Vol 3" matches +//! an owned `(Some(3), None)`; a release for "Ch 12" matches an owned +//! `(_, Some(12))` regardless of volume. + +/// Per-series ownership signature consumed by [`should_auto_ignore`]. +#[derive(Debug, Default, Clone)] +pub struct OwnedReleaseKeys { + /// `(volume, chapter)` pairs from book metadata, after filtering out + /// rows with both fields null. + /// + /// - `(Some(v), None)` — whole volume `v` owned (no specific chapter). + /// - `(Some(v), Some(c))` — chapter `c` of volume `v` owned. + /// - `(None, Some(c))` — chapter `c` owned, volume unknown. + pub keys: Vec<(Option, Option)>, + /// `true` if at least one book in the series carries volume metadata. + /// When `false`, we fall back to [`Self::volumes_owned_count`]. + pub has_any_volume_metadata: bool, + /// Count of "complete-volume" books (volume IS NOT NULL AND chapter + /// IS NULL). Only consulted in the count-fallback branch when + /// [`Self::has_any_volume_metadata`] is `false`. + pub volumes_owned_count: i64, +} + +/// True when the release matches a directly-owned key. +/// +/// Matching rules: +/// - **Volume + chapter release**: matches an owned `(Some(v), Some(c))`, +/// or an owned whole volume `(Some(v), None)` (whole volume implies all +/// chapters in it). +/// - **Volume-only release**: matches an owned whole volume +/// `(Some(v), None)`. Does NOT match if the user only owns specific +/// chapters of that volume. +/// - **Chapter-only release**: matches any owned key with the same +/// chapter, regardless of volume. +/// - **No volume and no chapter**: never auto-ignored. +/// +/// **Count fallback**: only when `has_any_volume_metadata` is false (no +/// book has volume metadata at all). For a volume-N release, treat +/// `1..=volumes_owned_count` as owned. We do not apply the count fallback +/// to chapter-only releases. +pub fn should_auto_ignore( + release_volume: Option, + release_chapter: Option, + owned: &OwnedReleaseKeys, +) -> bool { + match (release_volume, release_chapter) { + (None, None) => false, + + (Some(v), Some(c)) => owned.keys.iter().any(|(ov, oc)| match (ov, oc) { + (Some(ov), Some(oc)) => *ov == v && chapter_eq(*oc, c), + (Some(ov), None) => *ov == v, + _ => false, + }), + + (Some(v), None) => { + let direct = owned + .keys + .iter() + .any(|(ov, oc)| matches!((ov, oc), (Some(ov), None) if *ov == v)); + if direct { + return true; + } + // Count fallback: only when no book has volume metadata. + if !owned.has_any_volume_metadata && owned.volumes_owned_count > 0 { + return (v as i64) <= owned.volumes_owned_count; + } + false + } + + (None, Some(c)) => owned + .keys + .iter() + .any(|(_, oc)| matches!(oc, Some(oc) if chapter_eq(*oc, c))), + } +} + +/// Tolerant equality for chapter numbers. `f64` because both sides come +/// from DB columns; the values are typically small decimals (e.g. `12.5`) +/// and exact equality is fine for the realistic range. +fn chapter_eq(a: f64, b: f64) -> bool { + (a - b).abs() < 1e-6 +} + +#[cfg(test)] +mod tests { + use super::*; + + fn owned(keys: Vec<(Option, Option)>) -> OwnedReleaseKeys { + let has_any_volume_metadata = keys.iter().any(|(v, _)| v.is_some()); + let volumes_owned_count = keys + .iter() + .filter(|(v, c)| v.is_some() && c.is_none()) + .count() as i64; + OwnedReleaseKeys { + keys, + has_any_volume_metadata, + volumes_owned_count, + } + } + + #[test] + fn volume_release_owned_as_whole_volume() { + let o = owned(vec![(Some(1), None), (Some(2), None)]); + assert!(should_auto_ignore(Some(1), None, &o)); + assert!(should_auto_ignore(Some(2), None, &o)); + assert!(!should_auto_ignore(Some(3), None, &o)); + } + + #[test] + fn volume_release_not_matched_by_chapter_in_volume() { + // User only has chapter 5 of volume 1, not the whole volume. + let o = owned(vec![(Some(1), Some(5.0))]); + assert!(!should_auto_ignore(Some(1), None, &o)); + } + + #[test] + fn chapter_release_matches_any_volume() { + let o = owned(vec![(Some(2), Some(12.0))]); + // Release "Ch 12, vol unknown" → owned by virtue of having ch 12 of vol 2. + assert!(should_auto_ignore(None, Some(12.0), &o)); + assert!(!should_auto_ignore(None, Some(13.0), &o)); + } + + #[test] + fn chapter_release_matches_chapter_only_owned() { + let o = owned(vec![(None, Some(7.0))]); + assert!(should_auto_ignore(None, Some(7.0), &o)); + assert!(!should_auto_ignore(None, Some(8.0), &o)); + } + + #[test] + fn chapter_release_not_matched_by_owned_volume() { + // User owns volume 1 (whole). Release is "Ch 5". + // We do NOT infer ch 5 is in vol 1 — chapter→volume mapping unreliable. + let o = owned(vec![(Some(1), None)]); + assert!(!should_auto_ignore(None, Some(5.0), &o)); + } + + #[test] + fn vol_plus_chapter_release_matches_exact_pair() { + let o = owned(vec![(Some(1), Some(5.0))]); + assert!(should_auto_ignore(Some(1), Some(5.0), &o)); + assert!(!should_auto_ignore(Some(1), Some(6.0), &o)); + assert!(!should_auto_ignore(Some(2), Some(5.0), &o)); + } + + #[test] + fn vol_plus_chapter_release_matches_whole_volume() { + // Whole volume implies all chapters in it. + let o = owned(vec![(Some(1), None)]); + assert!(should_auto_ignore(Some(1), Some(5.0), &o)); + assert!(should_auto_ignore(Some(1), Some(99.5), &o)); + } + + #[test] + fn count_fallback_active_when_no_metadata() { + // No book has volume metadata, but volumes_owned_count = 2. + let o = OwnedReleaseKeys { + keys: vec![], + has_any_volume_metadata: false, + volumes_owned_count: 2, + }; + assert!(should_auto_ignore(Some(1), None, &o)); + assert!(should_auto_ignore(Some(2), None, &o)); + assert!(!should_auto_ignore(Some(3), None, &o)); + } + + #[test] + fn count_fallback_inactive_when_metadata_present() { + // User owns vols 3, 5, 7 (with metadata). Count fallback must NOT + // hide vol 1 — that's the bug the metadata path fixes. + let o = owned(vec![(Some(3), None), (Some(5), None), (Some(7), None)]); + assert!(!should_auto_ignore(Some(1), None, &o)); + assert!(should_auto_ignore(Some(3), None, &o)); + assert!(!should_auto_ignore(Some(4), None, &o)); + } + + #[test] + fn count_fallback_does_not_apply_to_chapter_releases() { + let o = OwnedReleaseKeys { + keys: vec![], + has_any_volume_metadata: false, + volumes_owned_count: 5, + }; + assert!(!should_auto_ignore(None, Some(3.0), &o)); + } + + #[test] + fn release_with_no_volume_or_chapter_never_ignored() { + let o = owned(vec![(Some(1), None)]); + assert!(!should_auto_ignore(None, None, &o)); + } + + #[test] + fn empty_owned_set_never_ignores() { + let o = OwnedReleaseKeys::default(); + assert!(!should_auto_ignore(Some(1), None, &o)); + assert!(!should_auto_ignore(None, Some(1.0), &o)); + assert!(!should_auto_ignore(Some(1), Some(1.0), &o)); + } + + #[test] + fn fractional_chapter_matches() { + let o = owned(vec![(Some(1), Some(12.5))]); + assert!(should_auto_ignore(None, Some(12.5), &o)); + assert!(!should_auto_ignore(None, Some(12.0), &o)); + } +} diff --git a/src/services/release/matcher.rs b/src/services/release/matcher.rs index 05c08678..64e5e5dc 100644 --- a/src/services/release/matcher.rs +++ b/src/services/release/matcher.rs @@ -50,6 +50,7 @@ impl AcceptedCandidate { confidence: c.series_match.confidence, metadata: c.metadata, observed_at: c.observed_at, + initial_state: None, } } } diff --git a/src/services/release/mod.rs b/src/services/release/mod.rs index 883d8177..8bd9a5bd 100644 --- a/src/services/release/mod.rs +++ b/src/services/release/mod.rs @@ -20,6 +20,7 @@ //! the threshold and hands the survivors to the ledger repository, which is //! itself idempotent on the natural dedup keys. +pub mod auto_ignore; pub mod backoff; pub mod candidate; pub mod languages; diff --git a/src/tasks/handlers/poll_release_source.rs b/src/tasks/handlers/poll_release_source.rs index b7301d00..73f5796b 100644 --- a/src/tasks/handlers/poll_release_source.rs +++ b/src/tasks/handlers/poll_release_source.rs @@ -33,17 +33,19 @@ use std::time::Duration; use tracing::{debug, error, info, warn}; use uuid::Uuid; +use crate::db::entities::release_ledger::state as ledger_state; use crate::db::entities::release_sources::plugin_id as source_plugin_id; use crate::db::entities::tasks; use crate::db::repositories::{ NewReleaseEntry, PluginsRepository, ReleaseLedgerRepository, ReleaseSourceRepository, - SeriesTrackingRepository, + SeriesRepository, SeriesTrackingRepository, }; use crate::events::{EntityChangeEvent, EventBroadcaster}; use crate::services::SettingsService; use crate::services::plugin::PluginManager; use crate::services::plugin::handle::PluginError; use crate::services::plugin::protocol::{ReleasePollRequest, ReleasePollResponse, methods}; +use crate::services::release::auto_ignore::{OwnedReleaseKeys, should_auto_ignore}; use crate::services::release::backoff::{HostBackoff, is_backoff_status}; use crate::services::release::matcher::{evaluate, resolve_threshold}; use crate::tasks::handlers::TaskHandler; @@ -346,6 +348,12 @@ impl TaskHandler for PollReleaseSourceHandler { ..Default::default() }; + // Cache per-series owned-keys lookups across candidates in this + // poll. A single source typically returns many candidates for + // the same series, so we don't want N+1 queries here. + let mut owned_cache: std::collections::HashMap = + std::collections::HashMap::new(); + for cand in response.candidates { let series_id = cand.series_match.codex_series_id; let threshold = match SeriesTrackingRepository::get(db, series_id).await { @@ -362,14 +370,45 @@ impl TaskHandler for PollReleaseSourceHandler { }; match evaluate(cand, threshold) { Ok(accepted) => { - let entry: NewReleaseEntry = accepted.into_ledger_entry(source.id); + let cand_volume = accepted.candidate.volume; + let cand_chapter = accepted.candidate.chapter; + + let initial_state = match resolve_initial_state( + db, + &mut owned_cache, + series_id, + cand_volume, + cand_chapter, + ) + .await + { + Ok(s) => s, + Err(e) => { + warn!( + "Task {}: owned-keys lookup failed for series {}: {} \ + (defaulting to announced)", + task.id, series_id, e + ); + None + } + }; + + let mut entry: NewReleaseEntry = accepted.into_ledger_entry(source.id); + entry.initial_state = initial_state.clone(); match ReleaseLedgerRepository::record(db, entry).await { Ok(outcome) => { if outcome.deduped { result.candidates_deduped += 1; } else { result.candidates_recorded += 1; - if let Some(broadcaster) = event_broadcaster { + // Only emit the SSE/notify event when the + // row landed as `announced`. Auto-ignored + // rows are bookkeeping; users see them + // only on demand via the "All" filter. + let landed_announced = + outcome.row.state == ledger_state::ANNOUNCED; + if landed_announced && let Some(broadcaster) = event_broadcaster + { emit_release_announced( broadcaster, &outcome.row, @@ -568,6 +607,35 @@ pub(crate) fn emit_release_announced( let _ = broadcaster.emit(EntityChangeEvent::release_announced(row, plugin_id)); } +/// Compute the initial ledger state for a candidate. Returns +/// `Some("ignored")` when the user already owns this volume/chapter; +/// `None` falls back to the repository's default (`announced`). +/// +/// Uses `owned_cache` so multiple candidates against the same series in +/// one poll only hit the DB once. +async fn resolve_initial_state( + db: &DatabaseConnection, + owned_cache: &mut std::collections::HashMap, + series_id: Uuid, + volume: Option, + chapter: Option, +) -> Result> { + // Skip the lookup entirely when the candidate has nothing to match against. + if volume.is_none() && chapter.is_none() { + return Ok(None); + } + if let std::collections::hash_map::Entry::Vacant(e) = owned_cache.entry(series_id) { + let owned = SeriesRepository::get_owned_release_keys_for_series(db, series_id).await?; + e.insert(owned); + } + let owned = &owned_cache[&series_id]; + if should_auto_ignore(volume, chapter, owned) { + Ok(Some(ledger_state::IGNORED.to_string())) + } else { + Ok(None) + } +} + /// Best-effort URL hint extraction used for backoff keying. /// /// Looks in `config.url`, `config.feed_url`, and `config.base_url` in that diff --git a/tests/api/releases.rs b/tests/api/releases.rs index 4902acda..de988941 100644 --- a/tests/api/releases.rs +++ b/tests/api/releases.rs @@ -103,6 +103,7 @@ async fn record_announced( confidence: 0.95, metadata: None, observed_at: chrono::Utc::now(), + initial_state: None, }, ) .await @@ -1441,6 +1442,83 @@ async fn bulk_dismiss_updates_state_for_listed_ids() { ); } +#[tokio::test] +async fn bulk_ignore_sets_state_to_ignored() { + let (db, _temp) = setup_test_db().await; + let series = make_series(&db).await; + let source = make_source(&db, "nyaa:user:tsuna69").await; + let id = record_announced(&db, series, source, "rel-i").await; + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let body = BulkReleaseActionRequest { + ids: vec![id], + action: BulkReleaseAction::Ignore, + }; + let req = post_json_request_with_auth("/api/v1/releases/bulk", &body, &token); + let (status, resp): (StatusCode, Option) = + make_json_request(app, req).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(resp.unwrap().affected, 1); + + assert_eq!( + ReleaseLedgerRepository::get_by_id(&db, id) + .await + .unwrap() + .unwrap() + .state, + "ignored" + ); +} + +#[tokio::test] +async fn bulk_reset_returns_state_to_announced_from_any_state() { + let (db, _temp) = setup_test_db().await; + let series = make_series(&db).await; + let source = make_source(&db, "nyaa:user:tsuna69").await; + let id_d = record_announced(&db, series, source, "rel-d").await; + let id_a = record_announced(&db, series, source, "rel-a").await; + let id_i = record_announced(&db, series, source, "rel-i").await; + + // Move each into a different non-announced state via direct repo call. + ReleaseLedgerRepository::set_state(&db, id_d, "dismissed") + .await + .unwrap(); + ReleaseLedgerRepository::set_state(&db, id_a, "marked_acquired") + .await + .unwrap(); + ReleaseLedgerRepository::set_state(&db, id_i, "ignored") + .await + .unwrap(); + + let state = create_test_auth_state(db.clone()).await; + let token = create_admin_and_token(&db, &state).await; + let app = create_test_router(state).await; + + let body = BulkReleaseActionRequest { + ids: vec![id_d, id_a, id_i], + action: BulkReleaseAction::Reset, + }; + let req = post_json_request_with_auth("/api/v1/releases/bulk", &body, &token); + let (status, resp): (StatusCode, Option) = + make_json_request(app, req).await; + assert_eq!(status, StatusCode::OK); + assert_eq!(resp.unwrap().affected, 3); + + for id in [id_d, id_a, id_i] { + assert_eq!( + ReleaseLedgerRepository::get_by_id(&db, id) + .await + .unwrap() + .unwrap() + .state, + "announced", + ); + } +} + #[tokio::test] async fn bulk_delete_clears_etags_on_affected_sources_only() { let (db, _temp) = setup_test_db().await; diff --git a/web/openapi.json b/web/openapi.json index 343c4b67..937a3a3a 100644 --- a/web/openapi.json +++ b/web/openapi.json @@ -7467,7 +7467,7 @@ "Releases" ], "summary": "Apply an action to a batch of ledger rows.", - "description": "`dismiss` and `mark-acquired` set state in-place. `delete` removes\nthe rows and clears the affected sources' etags so the next poll\nre-fetches without `If-None-Match`. All three run as bulk SQL — no\nper-row round trips — so this scales to deleting thousands of rows in\none call.", + "description": "`dismiss`, `mark-acquired`, `ignore`, and `reset` all set state\nin-place. `delete` removes the rows and clears the affected sources'\netags so the next poll re-fetches without `If-None-Match`. All run\nas bulk SQL (no per-row round trips), so this scales to thousands of\nrows in one call.", "operationId": "bulk_release_action", "requestBody": { "content": { @@ -21653,6 +21653,8 @@ "enum": [ "dismiss", "mark-acquired", + "ignore", + "reset", "delete" ] }, diff --git a/web/src/components/releases/ReleasesBulkActionBar.tsx b/web/src/components/releases/ReleasesBulkActionBar.tsx index 0925fd6d..314dff22 100644 --- a/web/src/components/releases/ReleasesBulkActionBar.tsx +++ b/web/src/components/releases/ReleasesBulkActionBar.tsx @@ -1,5 +1,11 @@ import { Button, Card, Group, Text } from "@mantine/core"; -import { IconCheck, IconTrash, IconX } from "@tabler/icons-react"; +import { + IconCheck, + IconEyeOff, + IconRefresh, + IconTrash, + IconX, +} from "@tabler/icons-react"; import type { BulkReleaseAction } from "@/api/releases"; interface ReleasesBulkActionBarProps { @@ -56,6 +62,26 @@ export function ReleasesBulkActionBar({ > Dismiss + + {onDeleteClick && (