Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 50 additions & 41 deletions apps/api/src/controllers/healthcheck.controller.ts
Original file line number Diff line number Diff line change
@@ -1,65 +1,74 @@
import { tryCatch } from '@openpanel/common';
import { chQuery, db } from '@openpanel/db';
import { getRedisCache } from '@openpanel/redis';
import type { FastifyReply, FastifyRequest } from 'fastify';
import { isShuttingDown } from '@/utils/graceful-shutdown';

// For docker compose healthcheck
export async function healthcheck(
request: FastifyRequest,
reply: FastifyReply
) {
try {
const redisRes = await getRedisCache().ping();
const dbRes = await db.$executeRaw`SELECT 1`;
const chRes = await chQuery('SELECT 1');
const status = redisRes && dbRes && chRes ? 200 : 503;

reply.status(status).send({
ready: status === 200,
redis: redisRes === 'PONG',
db: !!dbRes,
ch: chRes && chRes.length > 0,
const [redisResult, dbResult, chResult] = await Promise.all([
tryCatch(async () => (await getRedisCache().ping()) === 'PONG'),
tryCatch(async () => !!(await db.$executeRaw`SELECT 1`)),
tryCatch(async () => (await chQuery('SELECT 1')).length > 0),
]);

const dependencies = {
redis: redisResult.ok && redisResult.data,
db: dbResult.ok && dbResult.data,
ch: chResult.ok && chResult.data,
};
const dependencyErrors = {
redis: redisResult.error?.message,
db: dbResult.error?.message,
ch: chResult.error?.message,
};

const failedDependencies = Object.entries(dependencies)
.filter(([, ok]) => !ok)
.map(([name]) => name);
const workingDependencies = Object.entries(dependencies)
.filter(([, ok]) => ok)
.map(([name]) => name);

const status = failedDependencies.length === 0 ? 200 : 503;

if (status === 200) {
request.log.debug('healthcheck passed', {
workingDependencies,
failedDependencies,
dependencies,
});
} catch (error) {
request.log.warn('healthcheck failed', { error });
return reply.status(503).send({
ready: false,
reason: 'dependencies not ready',
} else {
request.log.warn('healthcheck failed', {
workingDependencies,
failedDependencies,
dependencies,
dependencyErrors,
});
}

return reply.status(status).send({
ready: status === 200,
...dependencies,
failedDependencies,
workingDependencies,
});
}

// Kubernetes - Liveness probe - returns 200 if process is alive
export async function liveness(request: FastifyRequest, reply: FastifyReply) {
// Kubernetes liveness — shallow, event loop only.
export async function liveness(_request: FastifyRequest, reply: FastifyReply) {
return reply.status(200).send({ live: true });
}

// Kubernetes - Readiness probe - returns 200 only when accepting requests, 503 during shutdown
export async function readiness(request: FastifyRequest, reply: FastifyReply) {
// Kubernetes readiness — shallow + shutdown-aware. Dependency health lives on
// /healthcheck so a downstream blip cannot trigger mass pod restarts.
export async function readiness(_request: FastifyRequest, reply: FastifyReply) {
if (isShuttingDown()) {
return reply.status(503).send({ ready: false, reason: 'shutting down' });
}

// Perform lightweight dependency checks for readiness
const redisRes = await getRedisCache().ping();
const dbRes = await db.$executeRaw`SELECT 1`;
const chRes = await chQuery('SELECT 1');

const isReady = redisRes;

if (!isReady) {
const res = {
redis: redisRes === 'PONG',
db: !!dbRes,
ch: chRes && chRes.length > 0,
};
request.log.warn('dependencies not ready', res);
return reply.status(503).send({
ready: false,
reason: 'dependencies not ready',
...res,
});
}

return reply.status(200).send({ ready: true });
}
17 changes: 17 additions & 0 deletions apps/worker/src/boot-workers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ import { miscJob } from './jobs/misc';
import { notificationJob } from './jobs/notification';
import { sessionsJob } from './jobs/sessions';
import { eventsGroupJobDuration } from './metrics';
import { setShuttingDown } from './utils/graceful-shutdown';
import {
enableEventsHeartbeat,
markEventsActivity,
} from './utils/worker-heartbeat';
import { logger } from './utils/logger';

const workerOptions: WorkerOptions = {
Expand Down Expand Up @@ -117,6 +122,10 @@ export function bootWorkers() {
}
}

if (eventQueuesToStart.length > 0) {
enableEventsHeartbeat();
}

for (const index of eventQueuesToStart) {
const queue = eventsGroupQueues[index];
if (!queue) {
Expand All @@ -141,6 +150,13 @@ export function bootWorkers() {
},
});

// Consumer-loop heartbeat for the readiness probe. `completed` fires after
// each processed job; `drained` fires on each poll cycle that finds the
// queue empty. Together they refresh the timestamp every poll cycle while
// the consumer is alive — busy or idle.
worker.on('completed', markEventsActivity);
worker.on('drained', markEventsActivity);
Comment on lines +157 to +158
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# Inspect groupmq's GroupWorker implementation for emitted events.
fd -t d '^groupmq$' node_modules 2>/dev/null | head -5
rg -nP "emit\s*\(\s*['\"](completed|drained)" --glob '**/groupmq/**' -C2 2>/dev/null || \
  rg -nP "emit\s*\(\s*['\"](completed|drained)" -C2

Repository: Openpanel-dev/openpanel

Length of output: 49


🌐 Web query:

groupmq 2.0.0-next.4 Worker events completed drained

💡 Result:

For “groupmq 2.0.0-next.4 Worker events completed drained” there isn’t enough evidence in the sources I found to answer what GroupMQ emits for “completed” and “drained” worker events in version 2.0.0-next.4 specifically. What I could verify: 1) GroupMQ is a Redis-backed per-group FIFO queue for Node/TypeScript, with a BullMQ-compatible API (site + README). It provides a Worker class and worker options (including concurrency), but the sources I found do not describe any “worker events” named “completed” or “drained”, nor do they document event payload semantics. 2) The only “completed/drained” event behavior I found is for BullMQ (different library). In BullMQ, there is a “drained” event and it has been discussed/changed regarding when it emits (e.g., only once when the worker is idle; and older versions emitting more often). This does not establish GroupMQ’s 2.0.0-next.4 behavior. 3) I located GroupMQ releases up to 2.0.0-next.1 (2026-03-12). I did not find a 2.0.0-next.4 release page/changelog entry in the searched results, so I couldn’t corroborate “2.0.0-next.4” content. Conclusion: I cannot confidently state what “completed” and “drained” worker events are in groupmq 2.0.0-next.4, or whether “drained” exists / how it fires. If you want, paste the exact code snippet where you attach the listeners (e.g., worker.on('completed'...) and worker.on('drained'...)) or share the groupmq 2.0.0-next.4 changelog / worker source file path, and I can re-run a targeted search against the exact implementation and produce a definitive, version-specific answer.

Citations:


🏁 Script executed:

#!/bin/bash
# 1. Find package files and check groupmq version
fd -type f -name "package*.json" | head -20 | xargs grep -l "groupmq" 2>/dev/null | head -3

# 2. Extract groupmq version from lock/package files
fd -type f -name "package*.json" | xargs cat 2>/dev/null | grep -A 2 '"groupmq"' | head -15

Repository: Openpanel-dev/openpanel

Length of output: 408


🏁 Script executed:

#!/bin/bash
# 3. Read the boot-workers.ts file mentioned in review to see context
cat -n apps/worker/src/boot-workers.ts | sed -n '140,170p'

Repository: Openpanel-dev/openpanel

Length of output: 1416


🏁 Script executed:

#!/bin/bash
# 4. Search for groupmq package in node_modules and find its main export
find node_modules -maxdepth 2 -type d -name groupmq 2>/dev/null | head -1

Repository: Openpanel-dev/openpanel

Length of output: 49


🏁 Script executed:

#!/bin/bash
# 5. Look for GroupWorker class definition in repository
rg -n "class GroupWorker" --type ts --type tsx --type js --type jsx

Repository: Openpanel-dev/openpanel

Length of output: 94


🏁 Script executed:

#!/bin/bash
# Check for groupmq in package locks and version
cat package-lock.json 2>/dev/null | jq '.packages | to_entries[] | select(.key | contains("groupmq")) | {key, version: .value.version}' 2>/dev/null || \
cat pnpm-lock.yaml 2>/dev/null | grep -A 2 "groupmq" | head -10 || \
grep "groupmq" yarn.lock 2>/dev/null | head -5

Repository: Openpanel-dev/openpanel

Length of output: 49


🏁 Script executed:

#!/bin/bash
# Find groupmq location (likely in node_modules)
find node_modules -maxdepth 2 -name "groupmq" -type d 2>/dev/null

Repository: Openpanel-dev/openpanel

Length of output: 49


🏁 Script executed:

#!/bin/bash
# Search for GroupWorker class definition - use correct rg syntax
rg -n "class GroupWorker" -t ts -t js

Repository: Openpanel-dev/openpanel

Length of output: 49


🏁 Script executed:

#!/bin/bash
# Search for groupmq mentions anywhere in the repo
rg "groupmq" -i | head -20

Repository: Openpanel-dev/openpanel

Length of output: 2333


🏁 Script executed:

#!/bin/bash
# Check package.json structure
ls -la package.json 2>/dev/null && head -30 package.json

Repository: Openpanel-dev/openpanel

Length of output: 1319


🏁 Script executed:

#!/bin/bash
# Find where GroupWorker is imported from
rg "GroupWorker" -B 2 -A 2 | head -40

Repository: Openpanel-dev/openpanel

Length of output: 1407


🏁 Script executed:

#!/bin/bash
# Check if groupmq is a workspace package (internal) or external dependency
ls -la packages/groupmq 2>/dev/null || echo "Not found in packages"
cat pnpm-workspace.yaml | grep -A 20 "packages:"

Repository: Openpanel-dev/openpanel

Length of output: 385


🏁 Script executed:

#!/bin/bash
# Check the actual groupmq source - look for it in node_modules or as workspace package
find . -path ./node_modules -prune -o -type f -name "*.ts" -print | xargs grep -l "class GroupWorker\|export.*GroupWorker" 2>/dev/null | head -5

Repository: Openpanel-dev/openpanel

Length of output: 49


🌐 Web query:

Openpanel-dev groupmq 2.0.0-next.4 Worker class events completed drained emit

💡 Result:

For Openpanel-dev/groupmq, the “drained” and “completed” worker/job lifecycle events you mention are not found in the official GroupMQ docs/pages returned by the research. The only worker events explicitly documented in the Openpanel-dev/groupmq README/docs are: ready, completed, failed, error, closed, and graceful-timeout. Additionally, GroupMQ’s documented job completion flow includes moving a successfully processed job into a :completed retention sorted set and unlocking the group lock, but this is internal state management—not an exposed “drained” event. So, based on the sources located for this query, there is no evidence of a “Worker class events … drained … completed … emit” sequence in groupmq 2.0.0-next.4. What is evidenced: - Worker emits documented events including completed (and other events), and drained is not listed. - Job completion flow marks the job as completed in the queue’s :completed sorted set and unlocks the group. If you can share the exact file/snippet from groupmq 2.0.0-next.4 you’re looking at (or the expected event names), I can re-check precisely whether drained exists under a different name, is part of QueueEvents (not Worker), or was removed/changed in the 2.0.0-next.x line.

Citations:


🏁 Script executed:

#!/bin/bash
# Look at node_modules to find groupmq source
ls -la node_modules/groupmq 2>/dev/null | head -20
# If it exists, check main entry point
cat node_modules/groupmq/package.json 2>/dev/null | jq '.main, .exports'

Repository: Openpanel-dev/openpanel

Length of output: 49


🏁 Script executed:

#!/bin/bash
# Check the actual event listeners being used in boot-workers.ts
grep -n "worker.on\|markEventsActivity" apps/worker/src/boot-workers.ts | head -20

Repository: Openpanel-dev/openpanel

Length of output: 193


🏁 Script executed:

#!/bin/bash
# Try to find groupmq Worker implementation in node_modules
find node_modules/groupmq -name "*.js" -o -name "*.ts" 2>/dev/null | head -20

Repository: Openpanel-dev/openpanel

Length of output: 49


🌐 Web query:

site:github.com/Openpanel-dev/groupmq Worker class emit events implementation

💡 Result:

In libraries like GroupMQ (a BullMQ-inspired Redis job queue for Node.js/TypeScript), the Worker class emits events such as 'ready', 'completed', 'failed', 'error', 'closed', and 'graceful-timeout'. These are listened to using standard Node.js EventEmitter syntax: worker.on('event', handler). Example implementation from GroupMQ documentation: worker.on('ready', => { console.log('Worker is ready'); }); worker.on('completed', (job) => { console.log('Job completed:', job.id); }); worker.on('failed', (job) => { console.log('Job failed:', job.id, job.failedReason); }); worker.on('error', (error) => { console.error('Worker error:', error); }); worker.on('closed', => { console.log('Worker closed'); }); worker.on('graceful-timeout', (job) => { console.log('Job exceeded graceful timeout:', job.id); }); // Remove listeners worker.off('completed', handler); worker.removeAllListeners; The Worker class extends Node.js EventEmitter, allowing it to emit these lifecycle and job status events for monitoring and handling in queue processing systems.

Citations:


🏁 Script executed:

#!/bin/bash
# Check the actual GroupMQ source code for Worker.emit calls
# GroupMQ is at catalog version 2.0.0-next.4, search for its source on GitHub
# Let's look at the local boot-workers more carefully to understand the context
cat -n apps/worker/src/boot-workers.ts | sed -n '1,50p'

Repository: Openpanel-dev/openpanel

Length of output: 2182


🏁 Script executed:

#!/bin/bash
# Check what markEventsActivity does and how often it's called
rg -n "markEventsActivity" -B 5 -A 5 apps/worker/src/

Repository: Openpanel-dev/openpanel

Length of output: 2485


🌐 Web query:

site:github.com/Openpanel-dev/groupmq "drained" event Worker emit

💡 Result:

No evidence found of a "drained" event emitted by a Worker in any job queue library, including BullMQ, groupmq, Bee-Queue, or others. Searches across official documentation and code repositories (primarily groupmq README and BullMQ-inspired libraries) show Worker events like 'ready', 'completed', 'failed', 'error', 'closed', and 'graceful-timeout', but no 'drained' event. The query may refer to a custom implementation, a different library, or a misremembered event name. In Bee-Queue, 'drained' exists but as a queue event when no jobs remain, not emitted by Worker.

Citations:


🏁 Script executed:

#!/bin/bash
# Let's check the actual GroupMQ source from the commit referenced in web results
# Look for any emit calls in groupmq that mention "drained"
rg "emit.*drained" --type ts --type js 2>/dev/null || echo "No 'drained' emit found"

# Also check what events ARE emitted by the Worker class
rg "this\.emit|worker\.emit" apps/worker/src/boot-workers.ts

Repository: Openpanel-dev/openpanel

Length of output: 90


🌐 Web query:

site:github.com/Openpanel-dev/groupmq Worker.emit all events 2.0.0-next

💡 Result:

In the groupmq library (a Redis-backed per-group FIFO queue for Node.js/TypeScript), Worker instances extend TypedEventEmitter and emit the following events: - 'ready': Emitted when the worker is ready. - 'completed': Emitted when a job completes, with the Job object. - 'failed': Emitted when a job fails, with the Job object and failedReason. - 'error': Emitted on worker errors, with the Error. - 'closed': Emitted when the worker closes. - 'graceful-timeout': Emitted when a job exceeds graceful timeout, with the Job object. No version 2.0.0-next or Cloudflare Workers context found matching the query. Latest groupmq version is v1.1.0 (Nov 2025), with next tags like 1.1.1-next.1. Use worker.on(event, handler) to listen and worker.off or worker.removeAllListeners to remove.

Citations:


Remove the worker.on('drained') listener — the event does not exist in groupmq 2.0.0-next.4.

The groupmq Worker class emits only: 'ready', 'completed', 'failed', 'error', 'closed', and 'graceful-timeout'. There is no 'drained' event. This means the heartbeat will only refresh when jobs complete; during idle periods (no jobs in queue), markEventsActivity() never fires, and the readiness probe will report 503 after 60 seconds.

Either implement a separate idle heartbeat mechanism (e.g., a periodic timer that calls markEventsActivity() at shorter intervals) or accept that the readiness probe will cycle to unhealthy during sustained idle periods.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@apps/worker/src/boot-workers.ts` around lines 157 - 158, Remove the
non-existent drained listener (the worker.on('drained', markEventsActivity)
line) and instead implement a periodic idle heartbeat: set up a timer (e.g.,
setInterval) that calls markEventsActivity at a safe interval shorter than the
readiness timeout (e.g., 30s) so idle workers stay healthy; start this timer
when the Worker (groupmq Worker) is initialized and ensure you clear it on
terminal events such as 'closed', 'error' or 'graceful-timeout' to avoid leaks,
leaving the existing worker.on('completed', markEventsActivity) intact for job
completion updates.


worker.run();
workers.push(worker);
logger.info(`Started worker for ${queueName}`, { concurrency });
Expand Down Expand Up @@ -337,6 +353,7 @@ export function bootWorkers() {
['uncaughtException', 'unhandledRejection', 'SIGTERM', 'SIGINT'].forEach(
(evt) => {
process.on(evt, (code) => {
setShuttingDown(true);
exitHandler(evt, code);
});
}
Expand Down
84 changes: 81 additions & 3 deletions apps/worker/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import { createBullBoard } from '@bull-board/api';
import { BullMQAdapter } from '@bull-board/api/bullMQAdapter';
import { ExpressAdapter } from '@bull-board/express';
import { createInitialSalts } from '@openpanel/db';
import { tryCatch } from '@openpanel/common';
import { chQuery, createInitialSalts, db } from '@openpanel/db';
import {
cohortComputeQueue,
cronQueue,
Expand All @@ -13,14 +14,19 @@ import {
notificationQueue,
sessionsQueue,
} from '@openpanel/queue';
import { getRedisCache } from '@openpanel/redis';
import express from 'express';
import { BullBoardGroupMQAdapter } from 'groupmq';
import client from 'prom-client';
import sourceMapSupport from 'source-map-support';
import { bootCron } from './boot-cron';
import { bootWorkers } from './boot-workers';
import { register } from './metrics';
import { isShuttingDown } from './utils/graceful-shutdown';
import { logger } from './utils/logger';
import { getEventsHeartbeat } from './utils/worker-heartbeat';

const EVENTS_HEARTBEAT_STALE_MS = 60_000;

sourceMapSupport.install();

Expand Down Expand Up @@ -69,8 +75,80 @@ async function start() {
});
});

app.get('/healthcheck', (req, res) => {
res.json({ status: 'ok' });
app.get('/healthcheck', async (req, res) => {
const [redisResult, dbResult, chResult] = await Promise.all([
tryCatch(async () => (await getRedisCache().ping()) === 'PONG'),
tryCatch(async () => !!(await db.$executeRaw`SELECT 1`)),
tryCatch(async () => (await chQuery('SELECT 1')).length > 0),
]);

const dependencies = {
redis: redisResult.ok && redisResult.data,
db: dbResult.ok && dbResult.data,
ch: chResult.ok && chResult.data,
};
const dependencyErrors = {
redis: redisResult.error?.message,
db: dbResult.error?.message,
ch: chResult.error?.message,
};

const failedDependencies = Object.entries(dependencies)
.filter(([, ok]) => !ok)
.map(([name]) => name);
const workingDependencies = Object.entries(dependencies)
.filter(([, ok]) => ok)
.map(([name]) => name);

const status = failedDependencies.length === 0 ? 200 : 503;

if (status !== 200) {
logger.warn('healthcheck failed', {
workingDependencies,
failedDependencies,
dependencies,
dependencyErrors,
});
}

res.status(status).json({
ready: status === 200,
...dependencies,
failedDependencies,
workingDependencies,
});
});

// Kubernetes liveness — shallow, event loop only.
app.get('/healthz/live', (req, res) => {
res.status(200).json({ live: true });
});

// Kubernetes readiness — shallow + shutdown-aware. When events workers run
// on this instance, also require the events consumer-loop heartbeat to be
// fresh (refreshed on each `completed`/`drained` event). If events are not
// enabled here, the heartbeat check is skipped.
app.get('/healthz/ready', (req, res) => {
if (isShuttingDown()) {
res.status(503).json({ ready: false, reason: 'shutting down' });
return;
}

const { enabled, lastActivityAt } = getEventsHeartbeat();
if (enabled) {
const idleMs = Date.now() - lastActivityAt;
if (idleMs > EVENTS_HEARTBEAT_STALE_MS) {
res.status(503).json({
ready: false,
reason: 'events consumer heartbeat stale',
idleMs,
thresholdMs: EVENTS_HEARTBEAT_STALE_MS,
});
return;
}
}

res.status(200).json({ ready: true });
});
Comment on lines +131 to 152
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Startup race: /healthz/ready returns 200 before workers are booted.

app.listen(PORT, …) at line 154 runs before bootWorkers() at line 159, and enableEventsHeartbeat() is only called inside bootWorkers. In the window between the HTTP server starting and bootWorkers() completing, enabled is false, so this endpoint reports ready even though no events consumer is running yet. If K8s scrapes readiness in that window it may route traffic / mark the pod ready prematurely.

Consider flipping an explicit "booted" flag after bootWorkers()/bootCron() resolve and returning 503 until then, or starting the HTTP server after boot.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@apps/worker/src/index.ts` around lines 131 - 152, The readiness endpoint
/healthz/ready can return 200 before workers are booted because app.listen(PORT,
...) starts the HTTP server prior to bootWorkers()/bootCron() calling
enableEventsHeartbeat(); change the startup sequencing so readiness is false
until boot complete: either delay app.listen until after
bootWorkers()/bootCron() resolve or add an explicit booted flag (e.g.,
bootComplete = false -> true after bootWorkers()/bootCron() finish) and update
the /healthz/ready handler (which currently uses isShuttingDown() and
getEventsHeartbeat()/EVENTS_HEARTBEAT_STALE_MS) to return 503 when bootComplete
is false; ensure the flag is set only after enableEventsHeartbeat() is active so
the endpoint never reports ready before workers are running.


app.listen(PORT, () => {
Expand Down
9 changes: 9 additions & 0 deletions apps/worker/src/utils/graceful-shutdown.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
let shuttingDown = false;

export function setShuttingDown(value: boolean) {
shuttingDown = value;
}

export function isShuttingDown() {
return shuttingDown;
}
23 changes: 23 additions & 0 deletions apps/worker/src/utils/worker-heartbeat.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
// Consumer-loop heartbeat, scoped to the events queue.
//
// Enabled only on instances that run events workers. Refreshed on each events
// worker's `completed` (job processed) or `drained` (poll returned empty), so
// a healthy consumer loop refreshes the timestamp every ~blockingTimeoutSec
// regardless of traffic. If enabled and the timestamp goes stale past the
// readiness threshold, the events consumer is wedged.

let enabled = false;
let lastActivityAt = Date.now();

export function enableEventsHeartbeat() {
enabled = true;
lastActivityAt = Date.now();
}

export function markEventsActivity() {
lastActivityAt = Date.now();
}

export function getEventsHeartbeat() {
return { enabled, lastActivityAt };
}
1 change: 1 addition & 0 deletions packages/common/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,5 @@ export * from './src/url';
export * from './src/id';
export * from './src/get-previous-metric';
export * from './src/group-by-labels';
export * from './src/try-catch';
export * from './server/get-client-ip';
15 changes: 15 additions & 0 deletions packages/common/src/try-catch.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
export type TryCatchResult<T, E = Error> =
| { ok: true; data: T; error: null }
| { ok: false; data: null; error: E };

export async function tryCatch<T, E = Error>(
input: (() => Promise<T>) | Promise<T>,
): Promise<TryCatchResult<T, E>> {
try {
const promise = typeof input === 'function' ? input() : input;
const data = await promise;
return { ok: true, data, error: null };
} catch (error) {
return { ok: false, data: null, error: error as E };
}
}
16 changes: 8 additions & 8 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pnpm-workspace.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ catalog:
"@types/react-dom": ^19.2.3
"@types/node": ^24.7.1
typescript: ^5.9.3
groupmq: 2.0.0-next.3
groupmq: 2.0.0-next.4
Loading