This commit is contained in:
CHEVALLIER Abel
2025-11-13 16:23:22 +01:00
parent de9c515a47
commit cb235644dc
34924 changed files with 3811102 additions and 0 deletions

41
node_modules/piscina/src/abort.ts generated vendored Normal file
View File

@@ -0,0 +1,41 @@
interface AbortSignalEventTargetAddOptions {
once: boolean;
}
export interface AbortSignalEventTarget {
addEventListener: (
name: 'abort',
listener: () => void,
options?: AbortSignalEventTargetAddOptions
) => void;
removeEventListener: (name: 'abort', listener: () => void) => void;
aborted?: boolean;
reason?: unknown;
}
export interface AbortSignalEventEmitter {
off: (name: 'abort', listener: () => void) => void;
once: (name: 'abort', listener: () => void) => void;
}
export type AbortSignalAny = AbortSignalEventTarget | AbortSignalEventEmitter;
export class AbortError extends Error {
constructor (reason?: AbortSignalEventTarget['reason']) {
// TS does not recognizes the cause clause
// @ts-expect-error
super('The task has been aborted', { cause: reason });
}
get name () {
return 'AbortError';
}
}
export function onabort (abortSignal: AbortSignalAny, listener: () => void) {
if ('addEventListener' in abortSignal) {
abortSignal.addEventListener('abort', listener, { once: true });
} else {
abortSignal.once('abort', listener);
}
}

61
node_modules/piscina/src/common.ts generated vendored Normal file
View File

@@ -0,0 +1,61 @@
import { fileURLToPath, URL } from 'node:url';
import { availableParallelism } from 'node:os';
import { kMovable, kTransferable, kValue } from './symbols';
// States wether the worker is ready to receive tasks
export const READY = '_WORKER_READY';
/**
* True if the object implements the Transferable interface
*
* @export
* @param {unknown} value
* @return {*} {boolean}
*/
export function isTransferable (value: unknown): boolean {
return (
value != null &&
typeof value === 'object' &&
kTransferable in value &&
kValue in value
);
}
/**
* True if object implements Transferable and has been returned
* by the Piscina.move() function
*
* TODO: narrow down the type of value
* @export
* @param {(unknown & PiscinaMovable)} value
* @return {*} {boolean}
*/
export function isMovable (value: any): boolean {
return isTransferable(value) && value[kMovable] === true;
}
export function markMovable (value: {}): void {
Object.defineProperty(value, kMovable, {
enumerable: false,
configurable: true,
writable: true,
value: true
});
}
// State of Piscina pool
export const commonState = {
isWorkerThread: false,
workerData: undefined
};
export function maybeFileURLToPath (filename : string) : string {
return filename.startsWith('file:')
? fileURLToPath(new URL(filename))
: filename;
}
export function getAvailableParallelism () : number {
return availableParallelism();
}

9
node_modules/piscina/src/errors.ts generated vendored Normal file
View File

@@ -0,0 +1,9 @@
export const Errors = {
ThreadTermination: () => new Error('Terminating worker thread'),
FilenameNotProvided: () =>
new Error('filename must be provided to run() or in options object'),
TaskQueueAtLimit: () => new Error('Task queue is at limit'),
NoTaskQueueAvailable: () =>
new Error('No task queue available and all Workers are busy'),
CloseTimeout: () => new Error('Close operation timed out')
};

104
node_modules/piscina/src/histogram.ts generated vendored Normal file
View File

@@ -0,0 +1,104 @@
import { RecordableHistogram, createHistogram } from 'node:perf_hooks';
export type PiscinaHistogramSummary = {
average: number;
mean: number;
stddev: number;
min: number;
max: number;
p0_001: number;
p0_01: number;
p0_1: number;
p1: number;
p2_5: number;
p10: number;
p25: number;
p50: number;
p75: number;
p90: number;
p97_5: number;
p99: number;
p99_9: number;
p99_99: number;
p99_999: number;
};
export type PiscinaHistogram = {
runTime: PiscinaHistogramSummary;
waitTime: PiscinaHistogramSummary;
resetRunTime(): void;
resetWaitTime(): void;
};
export class PiscinaHistogramHandler {
#runTime: RecordableHistogram;
#waitTime: RecordableHistogram;
constructor() {
this.#runTime = createHistogram();
this.#waitTime = createHistogram();
}
get runTimeSummary(): PiscinaHistogramSummary {
return PiscinaHistogramHandler.createHistogramSummary(this.#runTime);
}
get waitTimeSummary(): PiscinaHistogramSummary {
return PiscinaHistogramHandler.createHistogramSummary(this.#waitTime);
}
get runTimeCount(): number {
return this.#runTime.count;
}
recordRunTime(value: number) {
this.#runTime.record(PiscinaHistogramHandler.toHistogramIntegerNano(value));
}
recordWaitTime(value: number) {
this.#waitTime.record(
PiscinaHistogramHandler.toHistogramIntegerNano(value)
);
}
resetWaitTime(): void {
this.#waitTime.reset();
}
resetRunTime(): void {
this.#runTime.reset();
}
static createHistogramSummary(
histogram: RecordableHistogram
): PiscinaHistogramSummary {
const { mean, stddev, min, max } = histogram;
return {
average: mean / 1000,
mean: mean / 1000,
stddev,
min: min / 1000,
max: max / 1000,
p0_001: histogram.percentile(0.001) / 1000,
p0_01: histogram.percentile(0.01) / 1000,
p0_1: histogram.percentile(0.1) / 1000,
p1: histogram.percentile(1) / 1000,
p2_5: histogram.percentile(2.5) / 1000,
p10: histogram.percentile(10) / 1000,
p25: histogram.percentile(25) / 1000,
p50: histogram.percentile(50) / 1000,
p75: histogram.percentile(75) / 1000,
p90: histogram.percentile(90) / 1000,
p97_5: histogram.percentile(97.5) / 1000,
p99: histogram.percentile(99) / 1000,
p99_9: histogram.percentile(99.9) / 1000,
p99_99: histogram.percentile(99.99) / 1000,
p99_999: histogram.percentile(99.999) / 1000,
};
}
static toHistogramIntegerNano(milliseconds: number): number {
return Math.max(1, Math.trunc(milliseconds * 1000));
}
}

1013
node_modules/piscina/src/index.ts generated vendored Normal file

File diff suppressed because it is too large Load Diff

4
node_modules/piscina/src/main.ts generated vendored Normal file
View File

@@ -0,0 +1,4 @@
import Piscina from './index';
// Used as the require() entry point to maintain existing behavior
export = Piscina;

10
node_modules/piscina/src/symbols.ts generated vendored Normal file
View File

@@ -0,0 +1,10 @@
// Internal symbol used to mark Transferable objects returned
// by the Piscina.move() function
export const kMovable = Symbol('Piscina.kMovable');
export const kWorkerData = Symbol('Piscina.kWorkerData');
export const kTransferable = Symbol.for('Piscina.transferable');
export const kValue = Symbol.for('Piscina.valueOf');
export const kQueueOptions = Symbol.for('Piscina.queueOptions');
export const kRequestCountField = 0;
export const kResponseCountField = 1;
export const kFieldCount = 2;

25
node_modules/piscina/src/task_queue/array_queue.ts generated vendored Normal file
View File

@@ -0,0 +1,25 @@
import assert from 'node:assert';
import type { TaskQueue, Task } from '.';
export class ArrayTaskQueue implements TaskQueue {
tasks: Task[] = []
get size () {
return this.tasks.length;
}
shift (): Task | null {
return this.tasks.shift() ?? null;
}
push (task: Task): void {
this.tasks.push(task);
}
remove (task: Task): void {
const index = this.tasks.indexOf(task);
assert.notStrictEqual(index, -1);
this.tasks.splice(index, 1);
}
}

21
node_modules/piscina/src/task_queue/common.ts generated vendored Normal file
View File

@@ -0,0 +1,21 @@
import type { kQueueOptions } from '../symbols';
export interface TaskQueue {
readonly size: number;
shift(): Task | null;
remove(task: Task): void;
push(task: Task): void;
}
// Public Interface
export interface PiscinaTask extends Task {
taskId: number;
filename: string;
name: string;
created: number;
isAbortable: boolean;
}
export interface Task {
readonly [kQueueOptions]: object | null
};

177
node_modules/piscina/src/task_queue/fixed_queue.ts generated vendored Normal file
View File

@@ -0,0 +1,177 @@
/*
* Modified Fixed Queue Implementation based on the one from Node.js Project
* License: MIT License
* Source: https://github.com/nodejs/node/blob/de7b37880f5a541d5f874c1c2362a65a4be76cd0/lib/internal/fixed_queue.js
*/
import assert from 'node:assert';
import type { Task } from './common';
import { TaskQueue } from '.';
// Currently optimal queue size, tested on V8 6.0 - 6.6. Must be power of two.
const kSize = 2048;
const kMask = kSize - 1;
// The FixedQueue is implemented as a singly-linked list of fixed-size
// circular buffers. It looks something like this:
//
// head tail
// | |
// v v
// +-----------+ <-----\ +-----------+ <------\ +-----------+
// | [null] | \----- | next | \------- | next |
// +-----------+ +-----------+ +-----------+
// | item | <-- bottom | item | <-- bottom | [empty] |
// | item | | item | | [empty] |
// | item | | item | | [empty] |
// | item | | item | | [empty] |
// | item | | item | bottom --> | item |
// | item | | item | | item |
// | ... | | ... | | ... |
// | item | | item | | item |
// | item | | item | | item |
// | [empty] | <-- top | item | | item |
// | [empty] | | item | | item |
// | [empty] | | [empty] | <-- top top --> | [empty] |
// +-----------+ +-----------+ +-----------+
//
// Or, if there is only one circular buffer, it looks something
// like either of these:
//
// head tail head tail
// | | | |
// v v v v
// +-----------+ +-----------+
// | [null] | | [null] |
// +-----------+ +-----------+
// | [empty] | | item |
// | [empty] | | item |
// | item | <-- bottom top --> | [empty] |
// | item | | [empty] |
// | [empty] | <-- top bottom --> | item |
// | [empty] | | item |
// +-----------+ +-----------+
//
// Adding a value means moving `top` forward by one, removing means
// moving `bottom` forward by one. After reaching the end, the queue
// wraps around.
//
// When `top === bottom` the current queue is empty and when
// `top + 1 === bottom` it's full. This wastes a single space of storage
// but allows much quicker checks.
class FixedCircularBuffer {
bottom: number = 0
top: number = 0
list: Array<Task | undefined> = new Array(kSize)
next: FixedCircularBuffer | null = null
isEmpty () {
return this.top === this.bottom;
}
isFull () {
return ((this.top + 1) & kMask) === this.bottom;
}
push (data:Task) {
this.list[this.top] = data;
this.top = (this.top + 1) & kMask;
}
shift () {
const nextItem = this.list[this.bottom];
if (nextItem === undefined) { return null; }
this.list[this.bottom] = undefined;
this.bottom = (this.bottom + 1) & kMask;
return nextItem;
}
remove (task: Task) {
const indexToRemove = this.list.indexOf(task);
assert.notStrictEqual(indexToRemove, -1);
let curr = indexToRemove;
while (true) {
const next = (curr + 1) & kMask;
this.list[curr] = this.list[next];
if (this.list[curr] === undefined) {
break;
}
if (next === indexToRemove) {
this.list[curr] = undefined;
break;
}
curr = next;
}
this.top = (this.top - 1) & kMask;
}
}
export class FixedQueue implements TaskQueue {
head: FixedCircularBuffer
tail: FixedCircularBuffer
#size: number = 0
constructor () {
this.head = this.tail = new FixedCircularBuffer();
}
isEmpty () {
return this.head.isEmpty();
}
push (data:Task) {
if (this.head.isFull()) {
// Head is full: Creates a new queue, sets the old queue's `.next` to it,
// and sets it as the new main queue.
this.head = this.head.next = new FixedCircularBuffer();
}
this.head.push(data);
this.#size++;
}
shift (): Task | null {
const tail = this.tail;
const next = tail.shift();
if (next !== null) this.#size--;
if (tail.isEmpty() && tail.next !== null) {
// If there is another queue, it forms the new tail.
this.tail = tail.next;
tail.next = null;
}
return next;
}
remove (task: Task) {
let prev: FixedCircularBuffer | null = null;
let buffer = this.tail;
while (true) {
if (buffer.list.includes(task)) {
buffer.remove(task);
this.#size--;
break;
}
if (buffer.next === null) break;
prev = buffer;
buffer = buffer.next;
}
if (buffer.isEmpty()) {
// removing tail
if (prev === null) {
// if tail is not the last buffer
if (buffer.next !== null) this.tail = buffer.next;
} else {
// removing head
if (buffer.next === null) {
this.head = prev;
} else {
// removing buffer from middle
prev.next = buffer.next;
}
}
}
}
get size () {
return this.#size;
}
};

148
node_modules/piscina/src/task_queue/index.ts generated vendored Normal file
View File

@@ -0,0 +1,148 @@
import type { MessagePort } from 'node:worker_threads';
import { performance } from 'node:perf_hooks';
import { AsyncResource } from 'node:async_hooks';
import type { WorkerInfo } from '../worker_pool';
import type { AbortSignalAny, AbortSignalEventEmitter } from '../abort';
import { isMovable } from '../common';
import { kTransferable, kValue, kQueueOptions } from '../symbols';
import type { Task, TaskQueue, PiscinaTask } from './common';
export { ArrayTaskQueue } from './array_queue';
export { FixedQueue } from './fixed_queue';
export type TaskCallback = (err: Error, result: any) => void
// Grab the type of `transferList` off `MessagePort`. At the time of writing,
// only ArrayBuffer and MessagePort are valid, but let's avoid having to update
// our types here every time Node.js adds support for more objects.
export type TransferList = MessagePort extends {
postMessage: (value: any, transferList: infer T) => any
}
? T
: never
export type TransferListItem = TransferList extends Array<infer T> ? T : never
/**
* Verifies if a given TaskQueue is valid
*
* @export
* @param {*} value
* @return {*} {boolean}
*/
export function isTaskQueue (value: TaskQueue): boolean {
return (
typeof value === 'object' &&
value !== null &&
'size' in value &&
typeof value.shift === 'function' &&
typeof value.remove === 'function' &&
typeof value.push === 'function'
);
}
let taskIdCounter = 0;
// Extend AsyncResource so that async relations between posting a task and
// receiving its result are visible to diagnostic tools.
export class TaskInfo extends AsyncResource implements Task {
callback : TaskCallback;
task : any;
transferList : TransferList;
filename : string;
name : string;
taskId : number;
abortSignal : AbortSignalAny | null;
// abortListener : (() => void) | null = null;
workerInfo : WorkerInfo | null = null;
created : number;
started : number;
aborted = false;
_abortListener: (() => void) | null = null;
constructor (
task : any,
transferList : TransferList,
filename : string,
name : string,
callback : TaskCallback,
abortSignal : AbortSignalAny | null,
triggerAsyncId : number) {
super('Piscina.Task', { requireManualDestroy: true, triggerAsyncId });
this.callback = callback;
this.task = task;
this.transferList = transferList;
// If the task is a Transferable returned by
// Piscina.move(), then add it to the transferList
// automatically
if (isMovable(task)) {
// This condition should never be hit but typescript
// complains if we dont do the check.
/* istanbul ignore if */
if (this.transferList == null) {
this.transferList = [];
}
this.transferList =
this.transferList.concat(task[kTransferable]);
this.task = task[kValue];
}
this.filename = filename;
this.name = name;
// TODO: This should not be global
this.taskId = taskIdCounter++;
this.abortSignal = abortSignal;
this.created = performance.now();
this.started = 0;
}
// TODO: improve this handling - ideally should be extended
set abortListener (value: (() => void)) {
this._abortListener = () => {
this.aborted = true;
value();
};
}
get abortListener (): (() => void) | null {
return this._abortListener;
}
releaseTask () : any {
const ret = this.task;
this.task = null;
return ret;
}
done (err : Error | null, result? : any) : void {
this.runInAsyncScope(this.callback, null, err, result);
this.emitDestroy(); // `TaskInfo`s are used only once.
// If an abort signal was used, remove the listener from it when
// done to make sure we do not accidentally leak.
if (this.abortSignal && this.abortListener) {
if ('removeEventListener' in this.abortSignal && this.abortListener) {
this.abortSignal.removeEventListener('abort', this.abortListener);
} else {
(this.abortSignal as AbortSignalEventEmitter).off(
'abort', this.abortListener);
}
}
}
get [kQueueOptions] () : {} | null {
return this.task?.[kQueueOptions] ?? null;
}
get interface (): PiscinaTask {
return {
taskId: this.taskId,
filename: this.filename,
name: this.name,
created: this.created,
isAbortable: this.abortSignal !== null,
[kQueueOptions]: this[kQueueOptions]
};
}
}
export { Task, TaskQueue, PiscinaTask };

52
node_modules/piscina/src/types.ts generated vendored Normal file
View File

@@ -0,0 +1,52 @@
import type { MessagePort, Worker } from 'node:worker_threads';
import type { READY } from './common';
import type { kTransferable, kValue } from './symbols';
export interface StartupMessage {
filename: string | null
name: string
port: MessagePort
sharedBuffer: Int32Array
atomics: 'async' | 'sync' | 'disabled'
niceIncrement: number
}
export interface RequestMessage {
taskId: number
task: any
filename: string
name: string
histogramEnabled: number
}
export interface ReadyMessage {
[READY]: true
}
export interface ResponseMessage {
taskId: number
result: any
error: Error | null
time: number | null
}
export const commonState = {
isWorkerThread: false,
workerData: undefined
};
export interface Transferable {
readonly [kTransferable]: object;
readonly [kValue]: object;
}
export type ResourceLimits = Worker extends {
resourceLimits?: infer T;
}
? T
: {};
export type EnvSpecifier = typeof Worker extends {
new (filename: never, options?: { env: infer T }): Worker;
}
? T
: never;

239
node_modules/piscina/src/worker.ts generated vendored Normal file
View File

@@ -0,0 +1,239 @@
import { parentPort, MessagePort, receiveMessageOnPort, workerData } from 'node:worker_threads';
import { pathToFileURL } from 'node:url';
import { performance } from 'node:perf_hooks';
import type {
ReadyMessage,
RequestMessage,
ResponseMessage,
StartupMessage
} from './types';
import {
kResponseCountField,
kRequestCountField,
kTransferable,
kValue
} from './symbols';
import {
READY,
commonState,
isMovable
} from './common';
commonState.isWorkerThread = true;
commonState.workerData = workerData;
/* c8 ignore next*/
function noop (): void {}
const handlerCache : Map<string, Function> = new Map();
let useAtomics : boolean = process.env.PISCINA_DISABLE_ATOMICS !== '1';
let useAsyncAtomics : boolean = process.env.PISCINA_ENABLE_ASYNC_ATOMICS === '1';
// Get `import(x)` as a function that isn't transpiled to `require(x)` by
// TypeScript for dual ESM/CJS support.
// Load this lazily, so that there is no warning about the ESM loader being
// experimental (on Node v12.x) until we actually try to use it.
let importESMCached : (specifier : string) => Promise<any> | undefined;
function getImportESM () {
if (importESMCached === undefined) {
// eslint-disable-next-line no-new-func
importESMCached = new Function('specifier', 'return import(specifier)') as typeof importESMCached;
}
return importESMCached;
}
// Look up the handler function that we call when a task is posted.
// This is either going to be "the" export from a file, or the default export.
async function getHandler (filename : string, name : string) : Promise<Function | null> {
let handler = handlerCache.get(`${filename}/${name}`);
if (handler != null) {
return handler;
}
try {
// With our current set of TypeScript options, this is transpiled to
// `require(filename)`.
handler = await import(filename);
if (typeof handler !== 'function') {
handler = await ((handler as any)[name]);
}
} catch {}
if (typeof handler !== 'function') {
handler = await getImportESM()(pathToFileURL(filename).href);
if (typeof handler !== 'function') {
handler = await ((handler as any)[name]);
}
}
if (typeof handler !== 'function') {
return null;
}
// Limit the handler cache size. This should not usually be an issue and is
// only provided for pathological cases.
/* c8 ignore next */
if (handlerCache.size > 1000) {
const [[key]] = handlerCache;
handlerCache.delete(key);
}
handlerCache.set(`${filename}/${name}`, handler);
return handler;
}
// We should only receive this message once, when the Worker starts. It gives
// us the MessagePort used for receiving tasks, a SharedArrayBuffer for fast
// communication using Atomics, and the name of the default filename for tasks
// (so we can pre-load and cache the handler).
parentPort!.on('message', async (message: StartupMessage) => {
const { port, sharedBuffer, filename, name, niceIncrement } = message;
if (niceIncrement !== 0) {
(await import('@napi-rs/nice').catch(noop))?.nice(niceIncrement);
}
try {
if (filename != null) {
await getHandler(filename, name);
}
const readyMessage : ReadyMessage = { [READY]: true };
useAtomics = useAtomics !== false && message.atomics !== 'disabled';
useAsyncAtomics = useAtomics !== false && (useAsyncAtomics || message.atomics === 'async');
parentPort!.postMessage(readyMessage);
port.on('message', onMessage.bind(null, port, sharedBuffer));
if (useAtomics) {
const res = atomicsWaitLoop(port, sharedBuffer);
if (res?.then != null) await res;
}
} catch (error) {
throwInNextTick(error as Error);
}
});
let currentTasks : number = 0;
let lastSeenRequestCount : number = 0;
function atomicsWaitLoop (port : MessagePort, sharedBuffer : Int32Array) {
// This function is entered either after receiving the startup message, or
// when we are done with a task. In those situations, the *only* thing we
// expect to happen next is a 'message' on `port`.
// That call would come with the overhead of a C++ → JS boundary crossing,
// including async tracking. So, instead, if there is no task currently
// running, we wait for a signal from the parent thread using Atomics.wait(),
// and read the message from the port instead of generating an event,
// in order to avoid that overhead.
// The one catch is that this stops asynchronous operations that are still
// running from proceeding. Generally, tasks should not spawn asynchronous
// operations without waiting for them to finish, though.
if (useAsyncAtomics === true) {
// @ts-expect-error - for some reason not supported by TS
const { async, value } = Atomics.waitAsync(sharedBuffer, kRequestCountField, lastSeenRequestCount);
// We do not check for result
/* c8 ignore start */
return async === true && value.then(() => {
lastSeenRequestCount = Atomics.load(sharedBuffer, kRequestCountField);
// We have to read messages *after* updating lastSeenRequestCount in order
// to avoid race conditions.
let entry;
while ((entry = receiveMessageOnPort(port)) !== undefined) {
onMessage(port, sharedBuffer, entry.message);
}
});
/* c8 ignore stop */
}
while (currentTasks === 0) {
// Check whether there are new messages by testing whether the current
// number of requests posted by the parent thread matches the number of
// requests received.
// We do not check for result
Atomics.wait(sharedBuffer, kRequestCountField, lastSeenRequestCount);
lastSeenRequestCount = Atomics.load(sharedBuffer, kRequestCountField);
// We have to read messages *after* updating lastSeenRequestCount in order
// to avoid race conditions.
let entry;
while ((entry = receiveMessageOnPort(port)) !== undefined) {
onMessage(port, sharedBuffer, entry.message);
}
}
}
async function onMessage (
port : MessagePort,
sharedBuffer : Int32Array,
message : RequestMessage) {
currentTasks++;
const { taskId, task, filename, name } = message;
let response : ResponseMessage;
let transferList : any[] = [];
const start = message.histogramEnabled === 1 ? performance.now() : null;
try {
const handler = await getHandler(filename, name);
if (handler === null) {
throw new Error(`No handler function exported from ${filename}`);
}
let result = await handler(task);
if (isMovable(result)) {
transferList = transferList.concat(result[kTransferable]);
result = result[kValue];
}
response = {
taskId,
result,
error: null,
time: start == null ? null : Math.round(performance.now() - start)
};
if (useAtomics && !useAsyncAtomics) {
// If the task used e.g. console.log(), wait for the stream to drain
// before potentially entering the `Atomics.wait()` loop, and before
// returning the result so that messages will always be printed even
// if the process would otherwise be ready to exit.
if (process.stdout.writableLength > 0) {
await new Promise((resolve) => process.stdout.write('', resolve));
}
if (process.stderr.writableLength > 0) {
await new Promise((resolve) => process.stderr.write('', resolve));
}
}
} catch (error) {
response = {
taskId,
result: null,
// It may be worth taking a look at the error cloning algorithm we
// use in Node.js core here, it's quite a bit more flexible
error: <Error>error,
time: start == null ? null : Math.round(performance.now() - start)
};
}
currentTasks--;
try {
// Post the response to the parent thread, and let it know that we have
// an additional message available. If possible, use Atomics.wait()
// to wait for the next message.
port.postMessage(response, transferList);
Atomics.add(sharedBuffer, kResponseCountField, 1);
if (useAtomics) {
const res = atomicsWaitLoop(port, sharedBuffer);
if (res?.then != null) await res;
}
} catch (error) {
throwInNextTick(error as Error);
}
}
function throwInNextTick (error : Error) {
queueMicrotask(() => { throw error; });
}

39
node_modules/piscina/src/worker_pool/balancer/index.ts generated vendored Normal file
View File

@@ -0,0 +1,39 @@
import type { PiscinaTask } from '../../task_queue';
import type { PiscinaWorker } from '..';
export type PiscinaLoadBalancer = (
task: PiscinaTask,
workers: PiscinaWorker[]
) => PiscinaWorker | null; // If candidate is passed, it will be used as the result of the load balancer and ingore the command;
export type LeastBusyBalancerOptions = {
maximumUsage: number;
};
export function LeastBusyBalancer (
opts: LeastBusyBalancerOptions
): PiscinaLoadBalancer {
const { maximumUsage } = opts;
return (task, workers) => {
let candidate: PiscinaWorker | null = null;
let checkpoint = maximumUsage;
for (const worker of workers) {
if (worker.currentUsage === 0) {
candidate = worker;
break;
}
if (worker.isRunningAbortableTask) continue;
if (
!task.isAbortable &&
(worker.currentUsage < checkpoint)
) {
candidate = worker;
checkpoint = worker.currentUsage;
}
}
return candidate;
};
}

126
node_modules/piscina/src/worker_pool/base.ts generated vendored Normal file
View File

@@ -0,0 +1,126 @@
import assert from 'node:assert';
export abstract class AsynchronouslyCreatedResource {
onreadyListeners : (() => void)[] | null = [];
ondestroyListeners : (() => void)[] | null = [];
markAsReady () : void {
const listeners = this.onreadyListeners;
assert(listeners !== null);
this.onreadyListeners = null;
for (const listener of listeners) {
listener();
}
}
isReady () : boolean {
return this.onreadyListeners === null;
}
onReady (fn : () => void) {
if (this.onreadyListeners === null) {
fn(); // Zalgo is okay here.
return;
}
this.onreadyListeners.push(fn);
}
onDestroy (fn : () => void) {
if (this.ondestroyListeners === null) {
return;
}
this.ondestroyListeners.push(fn);
}
markAsDestroyed () {
const listeners = this.ondestroyListeners;
assert(listeners !== null);
this.ondestroyListeners = null;
for (const listener of listeners) {
listener();
}
}
isDestroyed () {
return this.ondestroyListeners === null;
}
abstract currentUsage() : number;
}
// TODO: this will eventually become an scheduler
export class AsynchronouslyCreatedResourcePool<
T extends AsynchronouslyCreatedResource> {
pendingItems = new Set<T>();
readyItems = new Set<T>();
maximumUsage : number;
onAvailableListeners : ((item : T) => void)[];
onTaskDoneListeners : ((item : T) => void)[];
constructor (maximumUsage : number) {
this.maximumUsage = maximumUsage;
this.onAvailableListeners = [];
this.onTaskDoneListeners = [];
}
add (item : T) {
this.pendingItems.add(item);
item.onReady(() => {
/* istanbul ignore else */
if (this.pendingItems.has(item)) {
this.pendingItems.delete(item);
this.readyItems.add(item);
this.maybeAvailable(item);
}
});
}
delete (item : T) {
this.pendingItems.delete(item);
this.readyItems.delete(item);
}
* [Symbol.iterator] () {
yield * this.pendingItems;
yield * this.readyItems;
}
get size () {
return this.pendingItems.size + this.readyItems.size;
}
maybeAvailable (item : T) {
/* istanbul ignore else */
if (item.currentUsage() < this.maximumUsage) {
for (const listener of this.onAvailableListeners) {
listener(item);
}
}
}
onAvailable (fn : (item : T) => void) {
this.onAvailableListeners.push(fn);
}
taskDone (item : T) {
for (let i = 0; i < this.onTaskDoneListeners.length; i++) {
this.onTaskDoneListeners[i](item);
}
}
onTaskDone (fn : (item : T) => void) {
this.onTaskDoneListeners.push(fn);
}
getCurrentUsage (): number {
let inFlight = 0;
for (const worker of this.readyItems) {
const currentUsage = worker.currentUsage();
if (Number.isFinite(currentUsage)) inFlight += currentUsage;
}
return inFlight;
}
}

200
node_modules/piscina/src/worker_pool/index.ts generated vendored Normal file
View File

@@ -0,0 +1,200 @@
import { Worker, MessagePort, receiveMessageOnPort } from 'node:worker_threads';
import { createHistogram, RecordableHistogram } from 'node:perf_hooks';
import assert from 'node:assert';
import { RequestMessage, ResponseMessage } from '../types';
import { Errors } from '../errors';
import { TaskInfo } from '../task_queue';
import { kFieldCount, kRequestCountField, kResponseCountField, kWorkerData } from '../symbols';
import { PiscinaHistogramHandler, PiscinaHistogramSummary } from '../histogram';
import { AsynchronouslyCreatedResource, AsynchronouslyCreatedResourcePool } from './base';
export * from './balancer';
type ResponseCallback = (response : ResponseMessage) => void;
export type PiscinaWorker = {
id: number;
currentUsage: number;
isRunningAbortableTask: boolean;
histogram: PiscinaHistogramSummary | null;
terminating: boolean;
destroyed: boolean;
[kWorkerData]: WorkerInfo;
}
export class WorkerInfo extends AsynchronouslyCreatedResource {
worker : Worker;
taskInfos : Map<number, TaskInfo>;
idleTimeout : NodeJS.Timeout | null = null;
port : MessagePort;
sharedBuffer : Int32Array;
lastSeenResponseCount : number = 0;
onMessage : ResponseCallback;
histogram: RecordableHistogram | null;
terminating = false;
destroyed = false;
constructor (
worker : Worker,
port : MessagePort,
onMessage : ResponseCallback,
enableHistogram: boolean
) {
super();
this.worker = worker;
this.port = port;
this.port.on('message',
(message : ResponseMessage) => this._handleResponse(message));
this.onMessage = onMessage;
this.taskInfos = new Map();
this.sharedBuffer = new Int32Array(
new SharedArrayBuffer(kFieldCount * Int32Array.BYTES_PER_ELEMENT));
this.histogram = enableHistogram ? createHistogram() : null;
}
get id (): number {
return this.worker.threadId;
}
destroy () : void {
if (this.terminating || this.destroyed) return;
this.terminating = true;
this.clearIdleTimeout();
this.worker.terminate();
this.port.close();
for (const taskInfo of this.taskInfos.values()) {
taskInfo.done(Errors.ThreadTermination());
}
this.taskInfos.clear();
this.terminating = false;
this.destroyed = true;
this.markAsDestroyed();
}
clearIdleTimeout () : void {
if (this.idleTimeout != null) {
clearTimeout(this.idleTimeout);
this.idleTimeout = null;
}
}
ref () : WorkerInfo {
this.port.ref();
return this;
}
unref () : WorkerInfo {
// Note: Do not call ref()/unref() on the Worker itself since that may cause
// a hard crash, see https://github.com/nodejs/node/pull/33394.
this.port.unref();
return this;
}
_handleResponse (message : ResponseMessage) : void {
if (message.time != null) {
this.histogram?.record(PiscinaHistogramHandler.toHistogramIntegerNano(message.time));
}
this.onMessage(message);
if (this.taskInfos.size === 0) {
// No more tasks running on this Worker means it should not keep the
// process running.
this.unref();
}
}
postTask (taskInfo : TaskInfo) {
assert(!this.taskInfos.has(taskInfo.taskId));
assert(!this.terminating && !this.destroyed);
const message : RequestMessage = {
task: taskInfo.releaseTask(),
taskId: taskInfo.taskId,
filename: taskInfo.filename,
name: taskInfo.name,
histogramEnabled: this.histogram != null ? 1 : 0
};
try {
this.port.postMessage(message, taskInfo.transferList);
} catch (err) {
// This would mostly happen if e.g. message contains unserializable data
// or transferList is invalid.
taskInfo.done(<Error>err);
return;
}
taskInfo.workerInfo = this;
this.taskInfos.set(taskInfo.taskId, taskInfo);
queueMicrotask(() => this.clearIdleTimeout())
this.ref();
// Inform the worker that there are new messages posted, and wake it up
// if it is waiting for one.
Atomics.add(this.sharedBuffer, kRequestCountField, 1);
Atomics.notify(this.sharedBuffer, kRequestCountField, 1);
}
processPendingMessages () {
if (this.destroyed) return;
// If we *know* that there are more messages than we have received using
// 'message' events yet, then try to load and handle them synchronously,
// without the need to wait for more expensive events on the event loop.
// This would usually break async tracking, but in our case, we already have
// the extra TaskInfo/AsyncResource layer that rectifies that situation.
const actualResponseCount =
Atomics.load(this.sharedBuffer, kResponseCountField);
if (actualResponseCount !== this.lastSeenResponseCount) {
this.lastSeenResponseCount = actualResponseCount;
let entry;
while ((entry = receiveMessageOnPort(this.port)) !== undefined) {
this._handleResponse(entry.message);
}
}
}
isRunningAbortableTask () : boolean {
// If there are abortable tasks, we are running one at most per Worker.
if (this.taskInfos.size !== 1) return false;
const [[, task]] = this.taskInfos;
return task.abortSignal !== null;
}
currentUsage () : number {
if (this.isRunningAbortableTask()) return Infinity;
return this.taskInfos.size;
}
get interface (): PiscinaWorker {
const worker = this;
return {
get id () {
return worker.worker.threadId;
},
get currentUsage () {
return worker.currentUsage();
},
get isRunningAbortableTask () {
return worker.isRunningAbortableTask();
},
get histogram () {
return worker.histogram != null ? PiscinaHistogramHandler.createHistogramSummary(worker.histogram) : null;
},
get terminating () {
return worker.terminating;
},
get destroyed () {
return worker.destroyed;
},
[kWorkerData]: worker
};
}
}
export { AsynchronouslyCreatedResourcePool };