mirror of
https://github.com/xCyanGrizzly/DragonsStash.git
synced 2026-05-11 14:21:15 +00:00
Fix worker getting stuck during sync: add timeouts, stuck detection, and safety limits
- Add invokeWithTimeout wrapper for TDLib API calls (2min timeout per call) - Add stuck detection to getChannelMessages: break if from_message_id doesn't advance - Add stuck detection to getTopicMessages: same protection for topic scanning - Add stuck detection to getForumTopicList: break if pagination offsets don't advance - Add max page limit (5000) to all scanning loops to prevent infinite pagination - Add mutex wait timeout (30min) to prevent indefinite blocking when holder hangs - Add cycle timeout (4h default, configurable via WORKER_CYCLE_TIMEOUT_MINUTES) - Fix end-of-page detection to use actual limit value instead of hardcoded 100 Co-authored-by: xCyanGrizzly <53275238+xCyanGrizzly@users.noreply.github.com>
This commit is contained in:
55
worker/dist/archive/split.js
vendored
Normal file
55
worker/dist/archive/split.js
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
import { createReadStream, createWriteStream } from "fs";
|
||||
import { stat } from "fs/promises";
|
||||
import path from "path";
|
||||
import { pipeline } from "stream/promises";
|
||||
import { childLogger } from "../util/logger.js";
|
||||
const log = childLogger("split");
|
||||
/** 2GB in bytes — Telegram's file size limit */
|
||||
const MAX_PART_SIZE = 2n * 1024n * 1024n * 1024n;
|
||||
/**
|
||||
* Split a file into ≤2GB parts using byte-level splitting.
|
||||
* Returns paths to the split parts. If the file is already ≤2GB, returns the original path.
|
||||
*/
|
||||
export async function byteLevelSplit(filePath) {
|
||||
const stats = await stat(filePath);
|
||||
const fileSize = BigInt(stats.size);
|
||||
if (fileSize <= MAX_PART_SIZE) {
|
||||
return [filePath];
|
||||
}
|
||||
const dir = path.dirname(filePath);
|
||||
const baseName = path.basename(filePath);
|
||||
const partSize = Number(MAX_PART_SIZE);
|
||||
const totalParts = Math.ceil(Number(fileSize) / partSize);
|
||||
const parts = [];
|
||||
log.info({ filePath, fileSize: Number(fileSize), totalParts }, "Splitting file");
|
||||
for (let i = 0; i < totalParts; i++) {
|
||||
const partNum = String(i + 1).padStart(3, "0");
|
||||
const partPath = path.join(dir, `${baseName}.${partNum}`);
|
||||
const start = i * partSize;
|
||||
const end = Math.min(start + partSize - 1, Number(fileSize) - 1);
|
||||
await pipeline(createReadStream(filePath, { start, end }), createWriteStream(partPath));
|
||||
parts.push(partPath);
|
||||
}
|
||||
log.info({ filePath, parts: parts.length }, "File split complete");
|
||||
return parts;
|
||||
}
|
||||
/**
|
||||
* Concatenate multiple files into a single output file by streaming
|
||||
* each input sequentially. Used for repacking multipart archives
|
||||
* that have oversized parts (>2GB) before re-splitting.
|
||||
*/
|
||||
export async function concatenateFiles(inputPaths, outputPath) {
|
||||
const out = createWriteStream(outputPath);
|
||||
for (let i = 0; i < inputPaths.length; i++) {
|
||||
log.info({ part: i + 1, total: inputPaths.length, file: path.basename(inputPaths[i]) }, "Concatenating part");
|
||||
await pipeline(createReadStream(inputPaths[i]), out, { end: false });
|
||||
}
|
||||
// Close the output stream
|
||||
await new Promise((resolve, reject) => {
|
||||
out.end(() => resolve());
|
||||
out.on("error", reject);
|
||||
});
|
||||
const stats = await stat(outputPath);
|
||||
log.info({ outputPath, totalBytes: stats.size, parts: inputPaths.length }, "Concatenation complete");
|
||||
}
|
||||
//# sourceMappingURL=split.js.map
|
||||
Reference in New Issue
Block a user