fix: properly handle asynchronous indexing when scanning plex collections

This commit is contained in:
Christian Benincasa
2026-03-15 18:37:57 -04:00
parent 013081475f
commit c9266d436f
9 changed files with 4100 additions and 5 deletions

View File

@@ -1648,6 +1648,7 @@ export class ProgramDB implements IProgramDB {
tagId,
programId: entityType === 'program' ? joinId : null,
groupingId: entityType === 'grouping' ? joinId : null,
source: 'media',
});
}
@@ -1656,7 +1657,9 @@ export class ProgramDB implements IProgramDB {
entityType === 'grouping'
? TagRelations.groupingId
: TagRelations.programId;
await tx.delete(TagRelations).where(eq(col, joinId));
await tx
.delete(TagRelations)
.where(and(eq(col, joinId), eq(TagRelations.source, 'media')));
if (newTagNames.size > 0) {
await tx
.insert(Tag)

View File

@@ -44,10 +44,15 @@ export class TagRepo {
uniq(programIds).map((id) => ({
tagId,
programId: id,
source: 'collection' as const,
})),
)
.onConflictDoNothing({
target: [TagRelations.tagId, TagRelations.programId],
target: [
TagRelations.tagId,
TagRelations.programId,
TagRelations.source,
],
});
}
@@ -62,6 +67,7 @@ export class TagRepo {
and(
eq(TagRelations.tagId, tagId),
inArray(TagRelations.programId, programIds),
eq(TagRelations.source, 'collection'),
),
);
}
@@ -77,6 +83,7 @@ export class TagRepo {
and(
eq(TagRelations.tagId, tagId),
inArray(TagRelations.groupingId, groupingIds),
eq(TagRelations.source, 'collection'),
),
);
}
@@ -96,10 +103,15 @@ export class TagRepo {
uniq(groupingIds).map((id) => ({
tagId,
groupingId: id,
source: 'collection' as const,
})),
)
.onConflictDoNothing({
target: [TagRelations.tagId, TagRelations.groupingId],
target: [
TagRelations.tagId,
TagRelations.groupingId,
TagRelations.source,
],
});
}
}

View File

@@ -30,12 +30,23 @@ export const TagRelations = sqliteTable(
groupingId: text().references(() => ProgramGrouping.uuid, {
onDelete: 'cascade',
}),
source: text({ enum: ['media', 'collection'] })
.notNull()
.default('media'),
},
(table) => [
index('tag_relations_program_id_idx').on(table.programId),
index('tag_relations_grouping_id_idx').on(table.groupingId),
unique('tag_program_id_unique_idx').on(table.tagId, table.programId),
unique('tag_grouping_id_unique_idx').on(table.tagId, table.groupingId),
unique('tag_program_id_unique_idx').on(
table.tagId,
table.programId,
table.source,
),
unique('tag_grouping_id_unique_idx').on(
table.tagId,
table.groupingId,
table.source,
),
],
);

View File

@@ -201,6 +201,9 @@ export class DirectMigrationProvider implements MigrationProvider {
'./sql/0041_easy_firebird.sql',
),
migration1771271020: Migration1771271020_FixCustomShowContentKey,
migration1773603770: makeKyselyMigrationFromSqlFile(
'./sql/0042_supreme_medusa.sql',
),
},
wrapWithTransaction,
),

View File

@@ -0,0 +1,21 @@
DROP INDEX `tag_program_id_unique_idx`;--> statement-breakpoint
DROP INDEX `tag_grouping_id_unique_idx`;--> statement-breakpoint
ALTER TABLE `tag_relations` ADD `source` text DEFAULT 'media' NOT NULL;--> statement-breakpoint
CREATE UNIQUE INDEX `tag_program_id_unique_idx` ON `tag_relations` (`tag_id`,`program_id`,`source`);--> statement-breakpoint
CREATE UNIQUE INDEX `tag_grouping_id_unique_idx` ON `tag_relations` (`tag_id`,`grouping_id`,`source`);--> statement-breakpoint
PRAGMA foreign_keys=OFF;--> statement-breakpoint
CREATE TABLE `__new_custom_show_content` (
`content_uuid` text NOT NULL,
`custom_show_uuid` text NOT NULL,
`index` integer NOT NULL,
PRIMARY KEY(`content_uuid`, `custom_show_uuid`, `index`),
FOREIGN KEY (`content_uuid`) REFERENCES `program`(`uuid`) ON UPDATE no action ON DELETE cascade,
FOREIGN KEY (`custom_show_uuid`) REFERENCES `custom_show`(`uuid`) ON UPDATE no action ON DELETE cascade,
FOREIGN KEY (`content_uuid`) REFERENCES `program`(`uuid`) ON UPDATE no action ON DELETE cascade,
FOREIGN KEY (`custom_show_uuid`) REFERENCES `custom_show`(`uuid`) ON UPDATE no action ON DELETE cascade
);
--> statement-breakpoint
INSERT INTO `__new_custom_show_content`("content_uuid", "custom_show_uuid", "index") SELECT "content_uuid", "custom_show_uuid", "index" FROM `custom_show_content`;--> statement-breakpoint
DROP TABLE `custom_show_content`;--> statement-breakpoint
ALTER TABLE `__new_custom_show_content` RENAME TO `custom_show_content`;--> statement-breakpoint
PRAGMA foreign_keys=ON;

File diff suppressed because it is too large Load Diff

View File

@@ -295,6 +295,13 @@
"when": 1770236977185,
"tag": "0041_easy_firebird",
"breakpoints": true
},
{
"idx": 42,
"version": "6",
"when": 1773603770514,
"tag": "0042_supreme_medusa",
"breakpoints": true
}
]
}

View File

@@ -1745,6 +1745,25 @@ export class MeilisearchService implements ISearchService {
return this.waitForTaskResult(task.taskUid);
}
async waitForPendingIndexTasks(): Promise<void> {
if (!this.started) return;
// TODO: paginate if > 1000 tasks (requires multiple getTasks calls)
const tasks = await this.client().tasks.getTasks({
indexUids: [ProgramsIndex.name],
statuses: ['enqueued', 'processing'],
limit: 1000,
});
if (tasks.results.length === 0) return;
this.logger.debug(
'Waiting for %d pending index task(s) before collection scan',
tasks.results.length,
);
await this.client().tasks.waitForTasks(
tasks.results.map((t) => t.uid),
{ timeout: 0 },
);
}
private async waitForTaskResult(
taskId: number,
canceledIsOk: boolean = false,

View File

@@ -155,6 +155,7 @@ export class PlexCollectionScanner extends ExternalCollectionScanner<PlexApiClie
}
private async scanLibraryInternal(ctx: Context) {
await this.searchService.waitForPendingIndexTasks();
this.logger.debug(
'Scanning Plex library "%s" for collections',
ctx.library.name,