os#cpus TypeScript Examples

The following examples show how to use os#cpus. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: parseConfig.ts    From web with MIT License 6 votes vote down vote up
defaultConfig: Partial<TestRunnerConfig> = {
  rootDir: process.cwd(),
  protocol: 'http:',
  hostname: 'localhost',
  middleware: [],
  plugins: [],
  watch: false,
  concurrentBrowsers: 2,
  concurrency: Math.max(1, cpus().length / 2),
  browserStartTimeout: minuteMs / 2,
  testsStartTimeout: secondMs * 20,
  testsFinishTimeout: minuteMs * 2,
  browserLogs: true,
}
Example #2
Source File: manifest.ts    From flatpak-vscode with MIT License 5 votes vote down vote up
/**
    * Gets an array of commands for a autotools build
    * - If the app is being rebuilt
    *   - Configure with `configure`
    * - Build with `make`
    * - Install with `make install`
    * @param  {string}     rebuild     Whether this is a rebuild
    * @param  {string[]}   buildArgs   The build arguments
    * @param  {string}     configOpts  The configuration options
    */
    getAutotoolsCommands(
        rebuild: boolean,
        buildArgs: string[],
        configOpts: string[]
    ): Command[] {
        const numCPUs = cpus().length
        const commands: Command[] = []
        if (!rebuild) {
            commands.push(
                new Command(
                    'flatpak',
                    [
                        'build',
                        ...buildArgs,
                        this.repoDir,
                        './configure',
                        '--prefix=/app',
                        ...configOpts,
                    ],
                    { cwd: this.workspace },
                )
            )
        }
        commands.push(
            new Command(
                'flatpak',
                ['build', ...buildArgs, this.repoDir, 'make', '-p', '-n', '-s'],
                { cwd: this.workspace },
            )
        )

        commands.push(
            new Command(
                'flatpak',
                ['build', ...buildArgs, this.repoDir, 'make', 'V=0', `-j${numCPUs}`, 'install'],
                { cwd: this.workspace },
            )
        )
        return commands
    }
Example #3
Source File: serve.ts    From graphql-mesh with MIT License 4 votes vote down vote up
export async function serveMesh(
  { baseDir, argsPort, getBuiltMesh, logger, rawServeConfig = {}, playgroundTitle }: ServeMeshOptions,
  cliParams: GraphQLMeshCLIParams
) {
  const {
    fork,
    port: configPort,
    hostname = platform() === 'win32' ||
    // is WSL?
    release().toLowerCase().includes('microsoft')
      ? '127.0.0.1'
      : '0.0.0.0',
    cors: corsConfig,
    handlers,
    staticFiles,
    playground: playgroundEnabled = process.env.NODE_ENV !== 'production',
    sslCredentials,
    endpoint: graphqlPath = '/graphql',
    browser,
    trustProxy = 'loopback',
  } = rawServeConfig;
  const port = argsPort || parseInt(process.env.PORT) || configPort || 4000;

  const protocol = sslCredentials ? 'https' : 'http';
  const serverUrl = `${protocol}://${hostname}:${port}`;
  if (!playgroundTitle) {
    playgroundTitle = rawServeConfig?.playgroundTitle || cliParams.playgroundTitle;
  }
  if (!cluster.isWorker && Boolean(fork)) {
    const forkNum = fork > 0 && typeof fork === 'number' ? fork : cpus().length;
    for (let i = 0; i < forkNum; i++) {
      const worker = cluster.fork();
      registerTerminateHandler(eventName => worker.kill(eventName));
    }
    logger.info(`${cliParams.serveMessage}: ${serverUrl} in ${forkNum} forks`);
  } else {
    logger.info(`Generating the unified schema...`);
    let readyFlag = false;
    const mesh$: Promise<MeshInstance> = getBuiltMesh()
      .then(mesh => {
        readyFlag = true;
        dnscache({
          enable: true,
          cache: function CacheCtor({ ttl }: { ttl: number }) {
            return {
              get: (key: string, callback: CallableFunction) =>
                mesh.cache
                  .get(key)
                  .then(value => callback(null, value))
                  .catch(e => callback(e)),
              set: (key: string, value: string, callback: CallableFunction) =>
                mesh.cache
                  .set(key, value, { ttl })
                  .then(() => callback())
                  .catch(e => callback(e)),
            };
          },
        });
        logger.info(`${cliParams.serveMessage}: ${serverUrl}`);
        registerTerminateHandler(eventName => {
          const eventLogger = logger.child(`${eventName}  ?`);
          eventLogger.info(`Destroying the server`);
          mesh.destroy();
        });
        return mesh;
      })
      .catch(e => handleFatalError(e, logger));
    const app = express();
    app.set('trust proxy', trustProxy);
    let httpServer: Server;

    if (sslCredentials) {
      const [key, cert] = await Promise.all([
        fs.promises.readFile(sslCredentials.key, 'utf-8'),
        fs.promises.readFile(sslCredentials.cert, 'utf-8'),
      ]);
      httpServer = createHTTPSServer({ key, cert }, app);
    } else {
      httpServer = createHTTPServer(app);
    }

    registerTerminateHandler(eventName => {
      const eventLogger = logger.child(`${eventName}?`);
      eventLogger.debug(`Stopping HTTP Server`);
      httpServer.close(error => {
        if (error) {
          eventLogger.debug(`HTTP Server couldn't be stopped: `, error);
        } else {
          eventLogger.debug(`HTTP Server has been stopped`);
        }
      });
    });

    if (corsConfig) {
      app.use(cors(corsConfig));
    }

    app.use(cookieParser());

    const wsServer = new ws.Server({
      path: graphqlPath,
      server: httpServer,
    });

    registerTerminateHandler(eventName => {
      const eventLogger = logger.child(`${eventName}?`);
      eventLogger.debug(`Stopping WebSocket Server`);
      wsServer.close(error => {
        if (error) {
          eventLogger.debug(`WebSocket Server couldn't be stopped: `, error);
        } else {
          eventLogger.debug(`WebSocket Server has been stopped`);
        }
      });
    });

    const { dispose: stopGraphQLWSServer } = useServer(
      {
        onSubscribe: async ({ connectionParams, extra: { request } }, msg) => {
          // spread connectionParams.headers to upgrade request headers.
          // we completely ignore the root connectionParams because
          // [@graphql-tools/url-loader adds the headers inside the "headers" field](https://github.com/ardatan/graphql-tools/blob/9a13357c4be98038c645f6efb26f0584828177cf/packages/loaders/url/src/index.ts#L597)
          for (const [key, value] of Object.entries(connectionParams.headers ?? {})) {
            // dont overwrite existing upgrade headers due to security reasons
            if (!(key.toLowerCase() in request.headers)) {
              request.headers[key.toLowerCase()] = value;
            }
          }
          const { getEnveloped } = await mesh$;
          const { schema, execute, subscribe, contextFactory, parse, validate } = getEnveloped(request);

          const args = {
            schema,
            operationName: msg.payload.operationName,
            document: parse(msg.payload.query),
            variableValues: msg.payload.variables,
            contextValue: await contextFactory(),
            execute,
            subscribe,
          };

          const errors = validate(args.schema, args.document);
          if (errors.length) return errors;

          return args;
        },
        execute: (args: any) => args.execute(args),
        subscribe: (args: any) => args.subscribe(args),
      },
      wsServer
    );

    registerTerminateHandler(eventName => {
      const eventLogger = logger.child(`${eventName}?`);
      eventLogger.debug(`Stopping GraphQL WS`);
      Promise.resolve()
        .then(() => stopGraphQLWSServer())
        .then(() => {
          eventLogger.debug(`GraphQL WS has been stopped`);
        })
        .catch(error => {
          eventLogger.debug(`GraphQL WS couldn't be stopped: `, error);
        });
    });

    const pubSubHandler: RequestHandler = (req, _res, next) => {
      mesh$
        .then(({ pubsub }) => {
          req['pubsub'] = pubsub;
          next();
        })
        .catch(e => handleFatalError(e, logger));
    };
    app.use(pubSubHandler);

    const registeredPaths = new Set<string>();
    await Promise.all(
      handlers?.map(async handlerConfig => {
        registeredPaths.add(handlerConfig.path);
        let handlerFn: any;
        const handlerLogger = logger.child(handlerConfig.path);
        if ('handler' in handlerConfig) {
          handlerFn = await loadFromModuleExportExpression<RequestHandler>(handlerConfig.handler, {
            cwd: baseDir,
            defaultExportName: 'default',
            importFn: defaultImportFn,
          });
        } else if ('pubsubTopic' in handlerConfig) {
          handlerFn = (req: any, res: any) => {
            let payload = req.body;
            handlerLogger.debug(`Payload received;`, payload);
            if (handlerConfig.payload) {
              payload = lodashGet(payload, handlerConfig.payload);
              handlerLogger.debug([`Extracting ${handlerConfig.payload};`, payload]);
            }
            const interpolationData = {
              req,
              res,
              payload,
            };
            handlerLogger.debug(`Interpolating ${handlerConfig.pubsubTopic} with `, interpolationData);
            const pubsubTopic = stringInterpolator.parse(handlerConfig.pubsubTopic, interpolationData);
            req['pubsub'].publish(pubsubTopic, payload);
            handlerLogger.debug(`Payload sent to ${pubsubTopic}`);
            res.end();
          };
        }
        app[handlerConfig.method?.toLowerCase() || 'use'](handlerConfig.path, handlerFn);
      }) || []
    );

    app.get('/healthcheck', (_req, res) => res.sendStatus(200));
    app.get('/readiness', (_req, res) => res.sendStatus(readyFlag ? 200 : 500));

    if (staticFiles) {
      app.use(express.static(staticFiles));
      const indexPath = path.join(baseDir, staticFiles, 'index.html');
      if (await pathExists(indexPath)) {
        app.get('/', (_req, res) => res.sendFile(indexPath));
      }
    }

    app.use(graphqlPath, graphqlHandler(mesh$, playgroundTitle, playgroundEnabled));

    app.get('/', (req, res, next) => {
      if (staticFiles) {
        next();
      } else {
        res.redirect(graphqlPath);
      }
    });

    httpServer
      .listen(parseInt(port.toString()), hostname, () => {
        const shouldntOpenBrowser = process.env.NODE_ENV?.toLowerCase() === 'production' || browser === false;
        if (!shouldntOpenBrowser) {
          open(serverUrl, typeof browser === 'string' ? { app: browser } : undefined).catch(() => {});
        }
      })
      .on('error', handleFatalError);

    return mesh$.then(mesh => ({
      mesh,
      httpServer,
      app,
      readyFlag,
      logger,
    }));
  }
  return null;
}
Example #4
Source File: prerender.ts    From vite-plugin-ssr with MIT License 4 votes vote down vote up
/**
 * Render your pages (e.g. for deploying to a static host).
 * @param partial Allow only a subset of pages to be pre-rendered.
 * @param root The root directory of your project (where `vite.config.js` live) (default: `process.cwd()`).
 * @param outDir The build directory of your project (default: `dist`).
 */
async function prerender({
  onPagePrerender = null,
  pageContextInit = {},
  partial = false,
  noExtraDir = false,
  root = process.cwd(),
  outDir = 'dist',
  parallel = cpus().length || 1,
  base,
}: {
  onPagePrerender?: Function | null
  pageContextInit?: Record<string, unknown>
  partial?: boolean
  noExtraDir?: boolean
  root?: string
  outDir?: string
  base?: string
  parallel?: number
} = {}) {
  assertArguments({ partial, noExtraDir, base, root, outDir, parallel })
  assert(base === undefined)
  if (!onPagePrerender) {
    console.log(`${cyan(`vite-plugin-ssr ${projectInfo.projectVersion}`)} ${green('pre-rendering HTML...')}`)
  }

  setProductionEnvVar()

  const ssrEnv = {
    isProduction: true as const,
    root,
    outDir,
    viteDevServer: undefined,
    baseUrl: '/',
    baseAssets: null,
  }
  setSsrEnv(ssrEnv)

  const { pluginManifest, pluginManifestPath, outDirPath } = getViteManifest()
  assertPluginManifest(pluginManifest, pluginManifestPath, outDirPath)

  ssrEnv.baseUrl = pluginManifest.base
  setSsrEnv(ssrEnv)

  const concurrencyLimit = pLimit(parallel)

  const globalContext = await getGlobalContext()
  objectAssign(globalContext, {
    _isPreRendering: true as const,
    _usesClientRouter: pluginManifest.usesClientRouter,
    prerenderPageContexts: [] as PageContext[],
  })

  objectAssign(globalContext, pageContextInit)

  const doNotPrerenderList: DoNotPrerenderList = []

  await callPrerenderHooks(globalContext, doNotPrerenderList, concurrencyLimit)

  await handlePagesWithStaticRoutes(globalContext, doNotPrerenderList, concurrencyLimit)

  await callOnBeforePrerenderHook(globalContext)

  const prerenderPageIds: PrerenderedPageIds = {}
  const htmlFiles: HtmlFile[] = []
  await routeAndPrerender(globalContext, htmlFiles, prerenderPageIds, concurrencyLimit, noExtraDir)

  warnContradictoryNoPrerenderList(prerenderPageIds, doNotPrerenderList)

  await prerender404Page(htmlFiles, globalContext)

  if (!onPagePrerender) {
    console.log(`${green(`✓`)} ${htmlFiles.length} HTML documents pre-rendered.`)
  }

  await Promise.all(
    htmlFiles.map((htmlFile) =>
      writeHtmlFile(htmlFile, root, outDir, doNotPrerenderList, concurrencyLimit, onPagePrerender),
    ),
  )

  warnMissingPages(prerenderPageIds, doNotPrerenderList, globalContext, partial)
}
Example #5
Source File: Admiral.ts    From eris-fleet with MIT License 4 votes vote down vote up
/** 
	 * Creates the sharding manager
	 * @param options Options to configure the sharding manager
	*/
	public constructor(options: Options) {
		super();
		this.objectLogging = options.objectLogging ?? false;
		this.path = options.path;
		this.BotWorker = options.BotWorker;
		this.token = options.token.startsWith("Bot ") ? options.token : `Bot ${options.token}`;
		this.guildsPerShard = options.guildsPerShard ?? "auto";
		this.shardCount = options.shards ?? "auto";
		this.clusterCount = options.clusters ?? "auto";
		this.clientOptions = options.clientOptions ?? {intents: Eris.Constants.Intents.allNonPrivileged};
		this.clusterTimeout = options.clusterTimeout ?? 5e3;
		this.serviceTimeout = options.serviceTimeout ?? 0;
		this.killTimeout = options.killTimeout ?? 10e3;
		this.erisClient = options.customClient ?? Eris.Client;
		this.useCentralRequestHandler = options.useCentralRequestHandler ?? false;
		this.nodeArgs = options.nodeArgs;
		this.statsInterval = options.statsInterval ?? 60e3;
		this.firstShardID = options.firstShardID ?? 0;
		this.lastShardID = options.lastShardID ?? 0;
		this.fetchTimeout = options.fetchTimeout ?? 10e3;
		this.loadClusterCodeImmediately = options.loadCodeImmediately ?? false;
		this.overrideConsole = options.overrideConsole ?? true;
		this.startServicesTogether = options.startServicesTogether ?? false;
		this.maxConcurrencyOverride = options.maxConcurrencyOverride;
		this.maxConcurrency = this.maxConcurrencyOverride ?? 1;
		this.shutdownTogether = options.shutdownTogether ?? false;
		this.broadcastAdmiralEvents = options.broadcastAdmiralEvents ?? true;
		this.maxRestarts = options.maxRestarts ?? 5;
		this.resharding = false;
		this.statsStarted = false;
		if (options.startingStatus) this.startingStatus = options.startingStatus;
		// Deals with needed components
		if (!options.token) throw "No token!";
		if (!options.path && !options.BotWorker) {
			throw "No BotWorker path or class!";
		}
		if (options.path && options.BotWorker) {
			throw "Your options has both a path and BotWorker class! Please use one!";
		}
		if (options.path) {
			if (!path.isAbsolute(options.path)) throw "The path needs to be absolute!";
		}
		if (options.services) {
			options.services.forEach((e) => {
				if (!e.path && !e.ServiceWorker) {
					throw `No path or class for service ${e.name}!`;
				}
				if (e.path && e.ServiceWorker) {
					throw `Service ${e.name} has both a path and class specified! Please only specify one!`;
				}
				if (e.path) {
					if (!path.isAbsolute(e.path)) {
						throw `Path for service ${e.name} needs to be absolute!`;
					}
				}
				if (options.services!.filter((s) => s.name === e.name).length > 1) {
					throw `Duplicate service names for service ${e.name}!`;
				}
			});
		}

		if (options.timeout) this.clientOptions.connectionTimeout = options.timeout;

		const allLogOptions = [
			"gateway_shards",
			"admiral_start",
			"shards_spread",
			"stats_update",
			"all_clusters_launched",
			"all_services_launched",
			"cluster_launch",
			"service_launch",
			"cluster_start",
			"service_start",
			"service_ready",
			"cluster_ready",
			"code_loaded",
			"shard_connect",
			"shard_ready",
			"shard_disconnect",
			"shard_resume",
			"service_restart",
			"cluster_restart",
			"service_shutdown",
			"cluster_shutdown",
			"total_shutdown",
			"resharding_transition_complete",
			"resharding_transition",
			"resharding_worker_killed",
			"concurrency_group_starting"
		] as LoggingOptions[];
		this.whatToLog = allLogOptions;
		if (options.lessLogging) {
			this.whatToLog = [
				"gateway_shards",
				"admiral_start",
				"shard_disconnect",
				"shard_resume",
				"cluster_ready",
				"service_ready",
				"cluster_start",
				"service_start",
				"all_services_launched",
				"all_clusters_launched",
				"total_shutdown",
				"cluster_shutdown",
				"service_shutdown",
				"resharding_transition_complete",
				"concurrency_group_starting"
			];
		}

		if (options.whatToLog) {
			if (options.whatToLog.blacklist) {
				options.whatToLog.blacklist.forEach((t: LoggingOptions) => {
					if (this.whatToLog.includes(t)) {
						this.whatToLog.splice(this.whatToLog.indexOf(t), 1);
					}
				});
			} else if (options.whatToLog.whitelist) {
				this.whatToLog = options.whatToLog.whitelist;
			}
		}
		if (options.services) this.servicesToCreate = options.services;

		this.services = new Collection();
		this.clusters = new Collection();
		this.launchingWorkers = new Collection();
		this.queue = new Queue();
		this.softKills = new Map();
		this.fetches = new Map();
		this.launchingManager = new Map();
		this.connectedClusterGroups = new Map();
		this.clustersSequentialFailedRestarts = new Map();
		this.servicesSequentialFailedRestarts = new Map();
		this.centralStore = new Map();
		// Admiral's simulated ipc
		this.ipc = new IPC({
			fetchTimeout: this.fetchTimeout,
			messageHandler: (initialMessage) => {
				const fakeWorker = {
					id: "master",
					send: (replyMessage: any) => {
						if (replyMessage.op !== "return") return;
						this.ipc.emit(replyMessage.id, replyMessage.value);
					}
				} as FakeWorker;
				this.ipcMessageHandler(fakeWorker, initialMessage);
			}
		});

		if (this.statsInterval !== "disable") {
			this.stats = {
				guilds: 0,
				users: 0,
				members: 0,
				clustersRam: 0,
				servicesRam: 0,
				masterRam: 0,
				totalRam: 0,
				voice: 0,
				largeGuilds: 0,
				shardCount: 0,
				clusters: [],
				services: [],
				timestamp: new Date().getTime(),
				centralRequestHandlerLatencyRef: undefined
			};
		}

		if (this.clusterCount === "auto") this.clusterCount = cpus().length;

		this.eris = new this.erisClient(this.token);

		this.launch();

		if (master.isMaster) {
			master.on("message", (worker, message) => {
				if (message.op) {
					switch (message.op) {
					case "launched": {
						const lr = this.launchingManager.get(worker.id);
						if (lr) {
							if (lr !== "launched") lr.waiting();
							this.launchingManager.delete(worker.id);
						} else {
							this.launchingManager.set(worker.id, "launched");
						}

						break;
					}
					case "connected": {
						const launchedWorker = this.launchingWorkers.get(worker.id);
						if (!launchedWorker) {
							this.error(new Error("launchedWorker is undefined"));
							return;
						}
						if (launchedWorker.cluster) {
							// don't change cluster map if it hasn't restarted yet
							if (!this.softKills.get(worker.id)) {
								this.clusters.set(launchedWorker.cluster.clusterID, {
									workerID: worker.id,
									clusterID: launchedWorker.cluster.clusterID,
									firstShardID: launchedWorker.cluster.firstShardID,
									lastShardID: launchedWorker.cluster.lastShardID,
								});
							}
							this.fetches.forEach((fetch) => {
								process.nextTick(() => worker.send(fetch));
							});
							// Emit a cluster is ready
							this.emit("clusterReady", launchedWorker.cluster);
							if (this.broadcastAdmiralEvents) this.broadcast("clusterReady", launchedWorker.cluster);
						} else if (launchedWorker.service) {
							if (!this.softKills.get(worker.id)) {
								this.services.set(launchedWorker.service.serviceName, {
									workerID: worker.id,
									serviceName: launchedWorker.service.serviceName,
									path: launchedWorker.service.path,
								});
							}
							// Emit a service is ready
							this.emit("serviceReady", launchedWorker.service);
							if (this.broadcastAdmiralEvents) this.broadcast("serviceReady", launchedWorker.service);
						}
						this.launchingWorkers.delete(worker.id);
						if (!this.resharding && !this.softKills.get(worker.id)) {
							worker.send({op: "loadCode"});
						}
						if (this.softKills.get(worker.id)) {
							this.softKills.get(worker.id)?.fn();
						}
						if (this.queue.queue[1]) {
							if (this.queue.queue[1].type === "cluster" && this.queue.queue[0].type === "cluster") {
								const clusterToGroupMap = this.chunkConcurrencyGroups();
								const clusterGroupID = clusterToGroupMap.get(launchedWorker.cluster!.clusterID);
								if (!clusterGroupID && clusterGroupID !== 0) {
									this.error("Error in starting cluster: invalid cluster group ID");
									return;
								}
								const groupConnectedTotal = (this.connectedClusterGroups.get(clusterGroupID) ?? 0) + 1;
								this.connectedClusterGroups.set(clusterGroupID, groupConnectedTotal);
								const groupConnectedMax = Object.entries(clusterToGroupMap).filter(([/*clusterID*/, groupID]) => groupID === clusterGroupID).length;
								if (groupConnectedTotal >= groupConnectedMax) {
									if (this.whatToLog.includes("concurrency_group_starting") && this.maxConcurrency > 1) this.log(`Starting concurrency cluster group ${clusterGroupID + 1}`, "Admiral");
									setTimeout(() => this.queue.execute(), this.clusterTimeout);
								}
								//setTimeout(() => this.queue.execute(), this.clusterTimeout);
							} else if (this.startServicesTogether && this.queue.queue[1].type === "cluster" && this.queue.queue[0].type === "service") {
								// check concurrency for services
								if (this.servicesToCreate) {
									if (this.services.size >= this.servicesToCreate.length) {
										this.queue.execute();
									}
								}
							} else {
								this.queue.execute();
							}
						} else {
							this.queue.execute();
							this.emit("ready");
							if (this.broadcastAdmiralEvents) this.broadcast("ready");
							// clear the connected groups values
							this.connectedClusterGroups.clear();
							// After all clusters and services are ready
							if (this.stats && this.pauseStats) {
								if (!this.resharding) {
									if (!this.statsStarted) this.startStats();
								} else {
									this.pauseStats = false;
								}
							}
						}
						break;
					}
					case "codeLoaded": {
						const cluster = this.clusters.find((c: ClusterCollection) => c.workerID === worker.id);
						const service = this.services.find((s: ServiceCollection) => s.workerID === worker.id);
						if (cluster) {
							this.clustersSequentialFailedRestarts.delete(cluster.clusterID);
							if (this.whatToLog.includes("code_loaded")) this.log("Cluster code loaded", `Cluster ${cluster.clusterID}`);
						} else if (service) {
							this.servicesSequentialFailedRestarts.delete(service.serviceName);
							if (this.whatToLog.includes("service_ready")) this.log(`Service ${service.serviceName} is ready!`);
						}
						break;
					}
					case "shutdown": {
						const workerID = this.queue.queue[0].workerID;
						if (this.softKills.get(workerID)) {
							this.softKills.get(workerID)?.fn();
						}
						// if (!this.queue.queue[1]) this.emit("ready");
						break;
					}
					case "collectStats": {
						if (this.prelimStats && !this.pauseStats) {
							const receivedTimestamp = new Date().getTime();
							const cluster = this.clusters.find((c: ClusterCollection) => c.workerID === worker.id);
							const service = this.services.find((s: ServiceCollection) => s.workerID === worker.id);
							if (cluster) {
								this.prelimStats.guilds += message.stats.guilds;
								this.prelimStats.users += message.stats.users;
								this.prelimStats.members += message.stats.members;
								this.prelimStats.voice += message.stats.voice;
								this.prelimStats.clustersRam += message.stats.ram;
								this.prelimStats.largeGuilds += message.stats.largeGuilds;
								this.prelimStats.shardCount += message.stats.shardStats.length;

								this.prelimStats.clusters.push(
									Object.assign(message.stats, {id: cluster.clusterID, ipcLatency: receivedTimestamp - message.stats.ipcLatency}),
								);
								if (typeof this.statsWorkersCounted === "number") this.statsWorkersCounted++;
							} else if (service) {
								this.prelimStats.servicesRam += message.stats.ram;
								this.prelimStats.services.push(
									Object.assign(message.stats, {name: service.serviceName, ipcLatency: receivedTimestamp - message.stats.ipcLatency}),
								);
								if (typeof this.statsWorkersCounted === "number") this.statsWorkersCounted++;
							}
							this.prelimStats.totalRam += message.stats.ram;
						}
						if (this.statsWorkersCounted === this.clusters.size + this.services.size) {
								this.prelimStats!.masterRam = process.memoryUsage().rss / 1e6;
								this.prelimStats!.totalRam += this.prelimStats!.masterRam;
								const compare = (a: ClusterStats, b: ClusterStats) => {
									if (a.id < b.id) return -1;
									if (a.id > b.id) return 1;
									return 0;
								};
								this.stats = Object.assign(this.prelimStats, {
									clusters: this.prelimStats!.clusters.sort(compare),
									timestamp: new Date().getTime()
								});
								this.collectingStats = false;
								
								if (this.whatToLog.includes("stats_update")) {
									this.log("Stats updated.", "Admiral");
								}

								// Sends the clusters the latest stats
								this.emit("stats", this.stats);
								if (this.broadcastAdmiralEvents) this.broadcast("stats", this.stats);
						}

						break;
					}
					case "centralApiRequest": {
						const data = parseJSON(message.request.dataSerialized);
						this.centralApiRequest(worker, message.request.UUID, data);
						break;
					}
					case "shardUpdate": {
						let liveCluster = true;
						const cluster = this.clusters.get(message.clusterID);
						if (cluster) {
							if (cluster.workerID !== worker.id) {
								liveCluster = false;
							}
						}
						const shardEmit: ShardUpdate = {
							clusterID: message.clusterID,
							shardID: message.shardID,
							liveCluster
						};
						switch (message.type) {
						case "shardReady": {
							if (this.whatToLog.includes("shard_ready")) this.ipcLog("log", `Shard ${message.shardID} is ready!`, worker);
							break;
						}
						case "shardConnect": {
							if (this.whatToLog.includes("shard_connect")) this.ipcLog("log", `Shard ${message.shardID} connected!`, worker);
							break;
						}
						case "shardDisconnect": {
							if (this.whatToLog.includes("shard_disconnect")) this.ipcLog("log", `Shard ${message.shardID} disconnected with error ${message.err}`, worker);
							break;
						}
						case "shardResume": {
							if (this.whatToLog.includes("shard_resume")) this.ipcLog("log", `Shard ${message.shardID} resumed!`, worker);
							break;
						}
						}
						this.emit(message.type, shardEmit);
						if (this.broadcastAdmiralEvents) this.broadcast(message.type, shardEmit);
						break;
					}
					default: {
						this.ipcMessageHandler(worker, message);
						break;
					}
					}
				}
			});

			master.on("disconnect", (worker) => {
				const cluster = this.clusters.find((c: ClusterCollection) => c.workerID === worker.id);
				const service = this.services.find((s: ServiceCollection) => s.workerID === worker.id);
				if (cluster) {
					this.warn(`Cluster ${cluster.clusterID} disconnected :(`, "Admiral");
				} else if (service) {
					this.warn(`Service ${service.serviceName} disconnected :(`, "Admiral");
				}
			});

			master.on("exit", (worker/*, code, signal*/) => {
				const cluster = this.clusters.find((c: ClusterCollection) => c.workerID === worker.id);
				const service = this.services.find((s: ServiceCollection) => s.workerID === worker.id);
				const name = () => {
					if (cluster) {
						return "Cluster " + cluster.clusterID;
					} else if (service) {
						return "Service " + service.serviceName;
					} else {
						return "Worker " + worker.id;
					}
				};

				if (this.softKills.get(worker.id)) {
					this.warn(name() + " died during a soft kill.", "Admiral");
					this.queue.execute();
					this.softKills.get(worker.id)?.fn(true);
				} else {
					// manage failed attempts
					if (this.maxRestarts !== -1) {
						if (cluster) {
							const totalRestarts = this.clustersSequentialFailedRestarts.get(cluster.clusterID) ?? 0;
							if (totalRestarts >= this.maxRestarts) {
								this.warn(`Cluster ${cluster.clusterID} has reached the maximum number of sequential restarts`, "Admiral");
								this.clustersSequentialFailedRestarts.delete(cluster.clusterID);
								// execute queue if the item in 0 failed
								if (this.queue.queue[0].workerID === worker.id) {
									this.queue.execute();
								}
								return;
							}
							this.clustersSequentialFailedRestarts.set(cluster.clusterID, totalRestarts + 1);
						} else if (service) {
							const totalRestarts = this.servicesSequentialFailedRestarts.get(service.serviceName) ?? 0;
							if (totalRestarts >= this.maxRestarts) {
								this.warn(`Service ${service.serviceName} has reached the maximum number of sequential restarts`, "Admiral");
								this.servicesSequentialFailedRestarts.delete(service.serviceName);
								// execute queue if the item in 0 failed
								if (this.queue.queue[0].workerID === worker.id) {
									this.queue.execute();
								}
								return;
							}
							this.servicesSequentialFailedRestarts.set(service.serviceName, totalRestarts + 1);
						}
					}
					
					const restartItem = this.restartWorker(worker);
					if (restartItem) this.queue.item(restartItem);
				}
			});

			this.queue.on("execute", (item: QueueItem/*, prevItem?: QueueItem*/) => {
				const worker = master.workers[item.workerID];
				if (worker) {
					if (item.message.op === "connect") {
						const concurrency = () => {
							if (item.type === "service" && this.startServicesTogether && this.queue.queue[1]) {
								// start services together
								if (this.queue.queue[1].type === "service") {
									const currentServiceName = (item.message as ServiceConnectMessage).serviceName;
									const nextServiceName = (this.queue.queue[1].message as ServiceConnectMessage).serviceName;
									if (currentServiceName !== nextServiceName) {
										this.queue.execute();
									}
								}
							} else if (item.type === "cluster" && this.queue.queue[1]) {
								// start clusters together
								if (this.queue.queue[1].type === "cluster") {
									const currentClusterID = (item.message as ClusterConnectMessage).clusterID;
									const nextClusterID = (this.queue.queue[1].message as ClusterConnectMessage).clusterID;
									const clusterToGroupMap = this.chunkConcurrencyGroups();
									const currentClusterGroup = clusterToGroupMap.get(currentClusterID);
									const nextClusterGroup = clusterToGroupMap.get(nextClusterID);
									if ((currentClusterID & this.maxConcurrency) === 0) {
										if (currentClusterGroup === 0) {
											if (this.whatToLog.includes("concurrency_group_starting") && this.maxConcurrency > 1) this.log(`Starting concurrency cluster group ${currentClusterGroup}`, "Admiral");
										}
									}
									if (currentClusterGroup === nextClusterGroup) {
										this.queue.execute();
									}
								}
							}
						};
						const lr = this.launchingManager.get(item.workerID);
						if (lr) {
							worker.send(item.message);
							this.launchingManager.delete(item.workerID);
							concurrency();
						} else {
							this.launchingManager.set(item.workerID, {
								waiting: () => {
									worker.send(item.message);
									concurrency();
								},
							});
						}
					} else if (item.message.op === "shutdown") {
						worker.send(item.message);
						setTimeout(() => {
							if (this.queue.queue[0]) if (this.queue.queue[0].workerID === item.workerID) {
								const worker = master.workers[item.workerID];
								if (worker) {
									worker.kill();
									const name = () => {
										const cluster = this.clusters.find((c: ClusterCollection) => c.workerID === item.workerID);
										const service = this.services.find((s: ServiceCollection) => s.workerID === item.workerID);
										if (cluster) {
											return "Cluster " + cluster.clusterID;
										} else if (service) {
											return "Service " + service.serviceName;
										} else {
											return "Worker " + item.workerID;
										}
									};
									this.warn("Safe shutdown failed for " + name() + ". Preformed hard shutdown instead.", "Admiral");
									if (this.softKills.get(item.workerID)) {
										this.softKills.get(item.workerID)?.fn(true);
									}
								}
							}
						}, this.killTimeout);
					} else {
						worker.send(item.message);
					}
				}
			});
		}
	}