mirror of
https://github.com/xCyanGrizzly/DragonsStash.git
synced 2026-05-11 06:11:15 +00:00
Fix worker getting stuck during sync: add timeouts, stuck detection, and safety limits
- Add invokeWithTimeout wrapper for TDLib API calls (2min timeout per call) - Add stuck detection to getChannelMessages: break if from_message_id doesn't advance - Add stuck detection to getTopicMessages: same protection for topic scanning - Add stuck detection to getForumTopicList: break if pagination offsets don't advance - Add max page limit (5000) to all scanning loops to prevent infinite pagination - Add mutex wait timeout (30min) to prevent indefinite blocking when holder hangs - Add cycle timeout (4h default, configurable via WORKER_CYCLE_TIMEOUT_MINUTES) - Fix end-of-page detection to use actual limit value instead of hardcoded 100 Co-authored-by: xCyanGrizzly <53275238+xCyanGrizzly@users.noreply.github.com>
This commit is contained in:
74
worker/dist/archive/multipart.js
vendored
Normal file
74
worker/dist/archive/multipart.js
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
import { detectArchive } from "./detect.js";
|
||||
import { config } from "../util/config.js";
|
||||
import { childLogger } from "../util/logger.js";
|
||||
const log = childLogger("multipart");
|
||||
/**
|
||||
* Group messages into archive sets (single files + multipart groups).
|
||||
* Messages should be pre-filtered to only include archive attachments.
|
||||
*/
|
||||
export function groupArchiveSets(messages) {
|
||||
// Detect and annotate each message
|
||||
const annotated = [];
|
||||
for (const msg of messages) {
|
||||
const info = detectArchive(msg.fileName);
|
||||
if (info) {
|
||||
annotated.push({ msg, info });
|
||||
}
|
||||
}
|
||||
// Group by baseName + format
|
||||
const groups = new Map();
|
||||
for (const item of annotated) {
|
||||
const key = `${item.info.format}:${item.info.baseName.toLowerCase()}`;
|
||||
const group = groups.get(key) ?? [];
|
||||
group.push(item);
|
||||
groups.set(key, group);
|
||||
}
|
||||
const results = [];
|
||||
for (const [, group] of groups) {
|
||||
const format = group[0].info.format;
|
||||
const baseName = group[0].info.baseName;
|
||||
// Separate explicit multipart entries from potential singles
|
||||
const multipartEntries = group.filter((g) => g.info.pattern !== "SINGLE");
|
||||
const singleEntries = group.filter((g) => g.info.pattern === "SINGLE");
|
||||
if (multipartEntries.length > 0) {
|
||||
// This is a multipart set
|
||||
// Check if any single entry is the "final part" of a legacy split
|
||||
const allEntries = [...multipartEntries, ...singleEntries];
|
||||
// Check time span — skip if parts span too long (0 = no limit)
|
||||
if (config.multipartTimeoutHours > 0) {
|
||||
const dates = allEntries.map((e) => e.msg.date.getTime());
|
||||
const span = Math.max(...dates) - Math.min(...dates);
|
||||
const maxSpanMs = config.multipartTimeoutHours * 60 * 60 * 1000;
|
||||
if (span > maxSpanMs) {
|
||||
log.warn({ baseName, format, span: span / 3600000 }, "Multipart set spans too long, skipping");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
// Sort by part number (singles get a very high number so they come last — they're the final part)
|
||||
allEntries.sort((a, b) => {
|
||||
const aNum = a.info.partNumber === -1 ? 999999 : a.info.partNumber;
|
||||
const bNum = b.info.partNumber === -1 ? 999999 : b.info.partNumber;
|
||||
return aNum - bNum;
|
||||
});
|
||||
results.push({
|
||||
type: format,
|
||||
baseName,
|
||||
parts: allEntries.map((e) => e.msg),
|
||||
isMultipart: true,
|
||||
});
|
||||
}
|
||||
else {
|
||||
// All entries are singles — each is its own archive set
|
||||
for (const entry of singleEntries) {
|
||||
results.push({
|
||||
type: format,
|
||||
baseName: entry.info.baseName,
|
||||
parts: [entry.msg],
|
||||
isMultipart: false,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
//# sourceMappingURL=multipart.js.map
|
||||
Reference in New Issue
Block a user