Compare commits
5 Commits
21c045559d
...
v0.2.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 472f046a85 | |||
| dfaf5a52df | |||
| 93b3322e45 | |||
| a532fd43b2 | |||
| 701bb69e6c |
@@ -42,6 +42,7 @@
|
||||
"@opentelemetry/semantic-conventions": "^1.40.0",
|
||||
"@sinclair/typebox": "^0.34.48",
|
||||
"better-auth": "^1.5.5",
|
||||
"bullmq": "^5.71.0",
|
||||
"class-transformer": "^0.5.1",
|
||||
"class-validator": "^0.15.1",
|
||||
"dotenv": "^17.3.1",
|
||||
|
||||
377
apps/gateway/src/__tests__/session-hardening.test.ts
Normal file
377
apps/gateway/src/__tests__/session-hardening.test.ts
Normal file
@@ -0,0 +1,377 @@
|
||||
/**
|
||||
* M5-008: Session hardening verification tests.
|
||||
*
|
||||
* Verifies:
|
||||
* 1. /model command switches model → session:info reflects updated modelId
|
||||
* 2. /agent command switches agent config → system prompt / agentName changes
|
||||
* 3. Session resume binds to a conversation (history injected via conversationHistory option)
|
||||
* 4. Session metrics track token usage and message count correctly
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type {
|
||||
AgentSession,
|
||||
AgentSessionOptions,
|
||||
ConversationHistoryMessage,
|
||||
} from '../agent/agent.service.js';
|
||||
import type { SessionInfoDto, SessionMetrics, SessionTokenMetrics } from '../agent/session.dto.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers — minimal AgentSession fixture
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeMetrics(overrides?: Partial<SessionMetrics>): SessionMetrics {
|
||||
return {
|
||||
tokens: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
||||
modelSwitches: 0,
|
||||
messageCount: 0,
|
||||
lastActivityAt: new Date().toISOString(),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeSession(overrides?: Partial<AgentSession>): AgentSession {
|
||||
return {
|
||||
id: 'session-001',
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-5-sonnet-20241022',
|
||||
piSession: {} as AgentSession['piSession'],
|
||||
listeners: new Set(),
|
||||
unsubscribe: vi.fn(),
|
||||
createdAt: Date.now(),
|
||||
promptCount: 0,
|
||||
channels: new Set(),
|
||||
skillPromptAdditions: [],
|
||||
sandboxDir: '/tmp',
|
||||
allowedTools: null,
|
||||
metrics: makeMetrics(),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function sessionToInfo(session: AgentSession): SessionInfoDto {
|
||||
return {
|
||||
id: session.id,
|
||||
provider: session.provider,
|
||||
modelId: session.modelId,
|
||||
...(session.agentName ? { agentName: session.agentName } : {}),
|
||||
createdAt: new Date(session.createdAt).toISOString(),
|
||||
promptCount: session.promptCount,
|
||||
channels: Array.from(session.channels),
|
||||
durationMs: Date.now() - session.createdAt,
|
||||
metrics: { ...session.metrics },
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Replicated AgentService methods (tested in isolation without full DI setup)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function updateSessionModel(session: AgentSession, modelId: string): void {
|
||||
session.modelId = modelId;
|
||||
session.metrics.modelSwitches += 1;
|
||||
session.metrics.lastActivityAt = new Date().toISOString();
|
||||
}
|
||||
|
||||
function applyAgentConfig(
|
||||
session: AgentSession,
|
||||
agentConfigId: string,
|
||||
agentName: string,
|
||||
modelId?: string,
|
||||
): void {
|
||||
session.agentConfigId = agentConfigId;
|
||||
session.agentName = agentName;
|
||||
if (modelId) {
|
||||
updateSessionModel(session, modelId);
|
||||
}
|
||||
}
|
||||
|
||||
function recordTokenUsage(session: AgentSession, tokens: SessionTokenMetrics): void {
|
||||
session.metrics.tokens.input += tokens.input;
|
||||
session.metrics.tokens.output += tokens.output;
|
||||
session.metrics.tokens.cacheRead += tokens.cacheRead;
|
||||
session.metrics.tokens.cacheWrite += tokens.cacheWrite;
|
||||
session.metrics.tokens.total += tokens.total;
|
||||
session.metrics.lastActivityAt = new Date().toISOString();
|
||||
}
|
||||
|
||||
function recordMessage(session: AgentSession): void {
|
||||
session.metrics.messageCount += 1;
|
||||
session.metrics.lastActivityAt = new Date().toISOString();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 1. /model command — switches model → session:info updated
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('/model command — model switch reflected in session:info', () => {
|
||||
let session: AgentSession;
|
||||
|
||||
beforeEach(() => {
|
||||
session = makeSession();
|
||||
});
|
||||
|
||||
it('updates modelId when /model is called with a model name', () => {
|
||||
updateSessionModel(session, 'claude-opus-4-5-20251001');
|
||||
|
||||
expect(session.modelId).toBe('claude-opus-4-5-20251001');
|
||||
});
|
||||
|
||||
it('increments modelSwitches metric after /model command', () => {
|
||||
expect(session.metrics.modelSwitches).toBe(0);
|
||||
|
||||
updateSessionModel(session, 'gpt-4o');
|
||||
expect(session.metrics.modelSwitches).toBe(1);
|
||||
|
||||
updateSessionModel(session, 'claude-3-5-sonnet-20241022');
|
||||
expect(session.metrics.modelSwitches).toBe(2);
|
||||
});
|
||||
|
||||
it('session:info DTO reflects the new modelId after switch', () => {
|
||||
updateSessionModel(session, 'claude-haiku-3-5-20251001');
|
||||
|
||||
const info = sessionToInfo(session);
|
||||
|
||||
expect(info.modelId).toBe('claude-haiku-3-5-20251001');
|
||||
expect(info.metrics.modelSwitches).toBe(1);
|
||||
});
|
||||
|
||||
it('lastActivityAt is updated after model switch', () => {
|
||||
const before = session.metrics.lastActivityAt;
|
||||
// Ensure at least 1ms passes
|
||||
vi.setSystemTime(Date.now() + 1);
|
||||
updateSessionModel(session, 'new-model');
|
||||
vi.useRealTimers();
|
||||
|
||||
expect(session.metrics.lastActivityAt).not.toBe(before);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 2. /agent command — switches agent config → system prompt / agentName updated
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('/agent command — agent config applied to session', () => {
|
||||
let session: AgentSession;
|
||||
|
||||
beforeEach(() => {
|
||||
session = makeSession();
|
||||
});
|
||||
|
||||
it('sets agentConfigId and agentName on the session', () => {
|
||||
applyAgentConfig(session, 'agent-uuid-001', 'CodeReviewer');
|
||||
|
||||
expect(session.agentConfigId).toBe('agent-uuid-001');
|
||||
expect(session.agentName).toBe('CodeReviewer');
|
||||
});
|
||||
|
||||
it('also updates modelId when agent config carries a model', () => {
|
||||
applyAgentConfig(session, 'agent-uuid-002', 'DataAnalyst', 'gpt-4o-mini');
|
||||
|
||||
expect(session.agentName).toBe('DataAnalyst');
|
||||
expect(session.modelId).toBe('gpt-4o-mini');
|
||||
expect(session.metrics.modelSwitches).toBe(1);
|
||||
});
|
||||
|
||||
it('does NOT update modelId when agent config has no model', () => {
|
||||
const originalModel = session.modelId;
|
||||
applyAgentConfig(session, 'agent-uuid-003', 'Planner', undefined);
|
||||
|
||||
expect(session.modelId).toBe(originalModel);
|
||||
expect(session.metrics.modelSwitches).toBe(0);
|
||||
});
|
||||
|
||||
it('session:info DTO reflects agentName after /agent switch', () => {
|
||||
applyAgentConfig(session, 'agent-uuid-004', 'DevBot');
|
||||
|
||||
const info = sessionToInfo(session);
|
||||
|
||||
expect(info.agentName).toBe('DevBot');
|
||||
});
|
||||
|
||||
it('multiple /agent calls update to the latest agent', () => {
|
||||
applyAgentConfig(session, 'agent-001', 'FirstAgent');
|
||||
applyAgentConfig(session, 'agent-002', 'SecondAgent');
|
||||
|
||||
expect(session.agentConfigId).toBe('agent-002');
|
||||
expect(session.agentName).toBe('SecondAgent');
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 3. Session resume — binds to conversation via conversationHistory
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('Session resume — binds to conversation', () => {
|
||||
it('conversationHistory option is preserved in session options', () => {
|
||||
const history: ConversationHistoryMessage[] = [
|
||||
{
|
||||
role: 'user',
|
||||
content: 'Hello, what is TypeScript?',
|
||||
createdAt: new Date('2026-01-01T00:01:00Z'),
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: 'TypeScript is a typed superset of JavaScript.',
|
||||
createdAt: new Date('2026-01-01T00:01:05Z'),
|
||||
},
|
||||
];
|
||||
|
||||
const options: AgentSessionOptions = {
|
||||
conversationHistory: history,
|
||||
provider: 'anthropic',
|
||||
modelId: 'claude-3-5-sonnet-20241022',
|
||||
};
|
||||
|
||||
expect(options.conversationHistory).toHaveLength(2);
|
||||
expect(options.conversationHistory![0]!.role).toBe('user');
|
||||
expect(options.conversationHistory![1]!.role).toBe('assistant');
|
||||
});
|
||||
|
||||
it('session with conversationHistory option carries the conversation binding', () => {
|
||||
const CONV_ID = 'conv-resume-001';
|
||||
const history: ConversationHistoryMessage[] = [
|
||||
{ role: 'user', content: 'Prior question', createdAt: new Date('2026-01-01T00:01:00Z') },
|
||||
];
|
||||
|
||||
// Simulate what ChatGateway does: pass conversationId + history to createSession
|
||||
const options: AgentSessionOptions = {
|
||||
conversationHistory: history,
|
||||
};
|
||||
|
||||
// The session ID is the conversationId in the gateway
|
||||
const session = makeSession({ id: CONV_ID });
|
||||
|
||||
expect(session.id).toBe(CONV_ID);
|
||||
expect(options.conversationHistory).toHaveLength(1);
|
||||
});
|
||||
|
||||
it('empty conversationHistory is valid (new conversation)', () => {
|
||||
const options: AgentSessionOptions = {
|
||||
conversationHistory: [],
|
||||
};
|
||||
|
||||
expect(options.conversationHistory).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('resumed session preserves all message roles', () => {
|
||||
const history: ConversationHistoryMessage[] = [
|
||||
{ role: 'system', content: 'You are a helpful assistant.', createdAt: new Date() },
|
||||
{ role: 'user', content: 'Question 1', createdAt: new Date() },
|
||||
{ role: 'assistant', content: 'Answer 1', createdAt: new Date() },
|
||||
{ role: 'user', content: 'Question 2', createdAt: new Date() },
|
||||
];
|
||||
|
||||
const roles = history.map((m) => m.role);
|
||||
expect(roles).toEqual(['system', 'user', 'assistant', 'user']);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// 4. Session metrics — token usage and message count
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('Session metrics — token usage and message count', () => {
|
||||
let session: AgentSession;
|
||||
|
||||
beforeEach(() => {
|
||||
session = makeSession();
|
||||
});
|
||||
|
||||
it('starts with zero metrics', () => {
|
||||
expect(session.metrics.tokens.input).toBe(0);
|
||||
expect(session.metrics.tokens.output).toBe(0);
|
||||
expect(session.metrics.tokens.total).toBe(0);
|
||||
expect(session.metrics.messageCount).toBe(0);
|
||||
expect(session.metrics.modelSwitches).toBe(0);
|
||||
});
|
||||
|
||||
it('accumulates token usage across multiple turns', () => {
|
||||
recordTokenUsage(session, {
|
||||
input: 100,
|
||||
output: 50,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
total: 150,
|
||||
});
|
||||
recordTokenUsage(session, {
|
||||
input: 200,
|
||||
output: 80,
|
||||
cacheRead: 10,
|
||||
cacheWrite: 5,
|
||||
total: 295,
|
||||
});
|
||||
|
||||
expect(session.metrics.tokens.input).toBe(300);
|
||||
expect(session.metrics.tokens.output).toBe(130);
|
||||
expect(session.metrics.tokens.cacheRead).toBe(10);
|
||||
expect(session.metrics.tokens.cacheWrite).toBe(5);
|
||||
expect(session.metrics.tokens.total).toBe(445);
|
||||
});
|
||||
|
||||
it('increments message count with each recordMessage call', () => {
|
||||
expect(session.metrics.messageCount).toBe(0);
|
||||
|
||||
recordMessage(session);
|
||||
expect(session.metrics.messageCount).toBe(1);
|
||||
|
||||
recordMessage(session);
|
||||
recordMessage(session);
|
||||
expect(session.metrics.messageCount).toBe(3);
|
||||
});
|
||||
|
||||
it('session:info DTO exposes correct metrics snapshot', () => {
|
||||
recordTokenUsage(session, {
|
||||
input: 500,
|
||||
output: 100,
|
||||
cacheRead: 20,
|
||||
cacheWrite: 10,
|
||||
total: 630,
|
||||
});
|
||||
recordMessage(session);
|
||||
recordMessage(session);
|
||||
updateSessionModel(session, 'claude-haiku-3-5-20251001');
|
||||
|
||||
const info = sessionToInfo(session);
|
||||
|
||||
expect(info.metrics.tokens.input).toBe(500);
|
||||
expect(info.metrics.tokens.output).toBe(100);
|
||||
expect(info.metrics.tokens.total).toBe(630);
|
||||
expect(info.metrics.messageCount).toBe(2);
|
||||
expect(info.metrics.modelSwitches).toBe(1);
|
||||
});
|
||||
|
||||
it('metrics are independent per session', () => {
|
||||
const sessionA = makeSession({ id: 'session-A' });
|
||||
const sessionB = makeSession({ id: 'session-B' });
|
||||
|
||||
recordTokenUsage(sessionA, { input: 100, output: 50, cacheRead: 0, cacheWrite: 0, total: 150 });
|
||||
recordMessage(sessionA);
|
||||
|
||||
// Session B should remain at zero
|
||||
expect(sessionB.metrics.tokens.input).toBe(0);
|
||||
expect(sessionB.metrics.messageCount).toBe(0);
|
||||
|
||||
// Session A should have updated values
|
||||
expect(sessionA.metrics.tokens.input).toBe(100);
|
||||
expect(sessionA.metrics.messageCount).toBe(1);
|
||||
});
|
||||
|
||||
it('lastActivityAt is updated after recording tokens', () => {
|
||||
const before = session.metrics.lastActivityAt;
|
||||
vi.setSystemTime(new Date(Date.now() + 100));
|
||||
recordTokenUsage(session, { input: 10, output: 5, cacheRead: 0, cacheWrite: 0, total: 15 });
|
||||
vi.useRealTimers();
|
||||
|
||||
expect(session.metrics.lastActivityAt).not.toBe(before);
|
||||
});
|
||||
|
||||
it('lastActivityAt is updated after recording a message', () => {
|
||||
const before = session.metrics.lastActivityAt;
|
||||
vi.setSystemTime(new Date(Date.now() + 100));
|
||||
recordMessage(session);
|
||||
vi.useRealTimers();
|
||||
|
||||
expect(session.metrics.lastActivityAt).not.toBe(before);
|
||||
});
|
||||
});
|
||||
128
apps/gateway/src/admin/admin-jobs.controller.ts
Normal file
128
apps/gateway/src/admin/admin-jobs.controller.ts
Normal file
@@ -0,0 +1,128 @@
|
||||
import {
|
||||
Controller,
|
||||
Get,
|
||||
HttpCode,
|
||||
HttpStatus,
|
||||
Inject,
|
||||
NotFoundException,
|
||||
Optional,
|
||||
Param,
|
||||
Post,
|
||||
Query,
|
||||
UseGuards,
|
||||
} from '@nestjs/common';
|
||||
import { AdminGuard } from './admin.guard.js';
|
||||
import { QueueService } from '../queue/queue.service.js';
|
||||
import type { JobDto, JobListDto, JobStatus, QueueListDto } from '../queue/queue-admin.dto.js';
|
||||
|
||||
@Controller('api/admin/jobs')
|
||||
@UseGuards(AdminGuard)
|
||||
export class AdminJobsController {
|
||||
constructor(
|
||||
@Optional()
|
||||
@Inject(QueueService)
|
||||
private readonly queueService: QueueService | null,
|
||||
) {}
|
||||
|
||||
/**
|
||||
* GET /api/admin/jobs
|
||||
* List jobs across all queues. Optional ?status=active|completed|failed|waiting|delayed
|
||||
*/
|
||||
@Get()
|
||||
async listJobs(@Query('status') status?: string): Promise<JobListDto> {
|
||||
if (!this.queueService) {
|
||||
return { jobs: [], total: 0 };
|
||||
}
|
||||
|
||||
const validStatuses: JobStatus[] = ['active', 'completed', 'failed', 'waiting', 'delayed'];
|
||||
const normalised = status as JobStatus | undefined;
|
||||
|
||||
if (normalised && !validStatuses.includes(normalised)) {
|
||||
return { jobs: [], total: 0 };
|
||||
}
|
||||
|
||||
const jobs: JobDto[] = await this.queueService.listJobs(normalised);
|
||||
return { jobs, total: jobs.length };
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/admin/jobs/:id/retry
|
||||
* Retry a specific failed job. The id is "<queue>__<bullmq-job-id>".
|
||||
*/
|
||||
@Post(':id/retry')
|
||||
@HttpCode(HttpStatus.OK)
|
||||
async retryJob(@Param('id') id: string): Promise<{ ok: boolean; message: string }> {
|
||||
if (!this.queueService) {
|
||||
throw new NotFoundException('Queue service is not available');
|
||||
}
|
||||
|
||||
const result = await this.queueService.retryJob(id);
|
||||
if (!result.ok) {
|
||||
throw new NotFoundException(result.message);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/admin/jobs/queues
|
||||
* Return status for all managed queues.
|
||||
*/
|
||||
@Get('queues')
|
||||
async listQueues(): Promise<QueueListDto> {
|
||||
if (!this.queueService) {
|
||||
return { queues: [] };
|
||||
}
|
||||
|
||||
const health = await this.queueService.getHealthStatus();
|
||||
const queues = Object.entries(health.queues).map(([name, stats]) => ({
|
||||
name,
|
||||
waiting: stats.waiting,
|
||||
active: stats.active,
|
||||
completed: stats.completed,
|
||||
failed: stats.failed,
|
||||
delayed: 0,
|
||||
paused: stats.paused,
|
||||
}));
|
||||
|
||||
return { queues };
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/admin/jobs/queues/:name/pause
|
||||
* Pause the named queue.
|
||||
*/
|
||||
@Post('queues/:name/pause')
|
||||
@HttpCode(HttpStatus.OK)
|
||||
async pauseQueue(@Param('name') name: string): Promise<{ ok: boolean; message: string }> {
|
||||
if (!this.queueService) {
|
||||
throw new NotFoundException('Queue service is not available');
|
||||
}
|
||||
|
||||
const result = await this.queueService.pauseQueue(name);
|
||||
if (!result.ok) {
|
||||
throw new NotFoundException(result.message);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/admin/jobs/queues/:name/resume
|
||||
* Resume the named queue.
|
||||
*/
|
||||
@Post('queues/:name/resume')
|
||||
@HttpCode(HttpStatus.OK)
|
||||
async resumeQueue(@Param('name') name: string): Promise<{ ok: boolean; message: string }> {
|
||||
if (!this.queueService) {
|
||||
throw new NotFoundException('Queue service is not available');
|
||||
}
|
||||
|
||||
const result = await this.queueService.resumeQueue(name);
|
||||
if (!result.ok) {
|
||||
throw new NotFoundException(result.message);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,11 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
import { AdminController } from './admin.controller.js';
|
||||
import { AdminHealthController } from './admin-health.controller.js';
|
||||
import { AdminJobsController } from './admin-jobs.controller.js';
|
||||
import { AdminGuard } from './admin.guard.js';
|
||||
|
||||
@Module({
|
||||
controllers: [AdminController, AdminHealthController],
|
||||
controllers: [AdminController, AdminHealthController, AdminJobsController],
|
||||
providers: [AdminGuard],
|
||||
})
|
||||
export class AdminModule {}
|
||||
|
||||
@@ -22,6 +22,7 @@ import { PreferencesModule } from './preferences/preferences.module.js';
|
||||
import { GCModule } from './gc/gc.module.js';
|
||||
import { ReloadModule } from './reload/reload.module.js';
|
||||
import { WorkspaceModule } from './workspace/workspace.module.js';
|
||||
import { QueueModule } from './queue/queue.module.js';
|
||||
import { ThrottlerGuard, ThrottlerModule } from '@nestjs/throttler';
|
||||
|
||||
@Module({
|
||||
@@ -46,6 +47,7 @@ import { ThrottlerGuard, ThrottlerModule } from '@nestjs/throttler';
|
||||
PreferencesModule,
|
||||
CommandsModule,
|
||||
GCModule,
|
||||
QueueModule,
|
||||
ReloadModule,
|
||||
WorkspaceModule,
|
||||
],
|
||||
|
||||
@@ -5,59 +5,72 @@ import {
|
||||
type OnModuleInit,
|
||||
type OnModuleDestroy,
|
||||
} from '@nestjs/common';
|
||||
import cron from 'node-cron';
|
||||
import { SummarizationService } from './summarization.service.js';
|
||||
import { SessionGCService } from '../gc/session-gc.service.js';
|
||||
import {
|
||||
QueueService,
|
||||
QUEUE_SUMMARIZATION,
|
||||
QUEUE_GC,
|
||||
QUEUE_TIER_MANAGEMENT,
|
||||
} from '../queue/queue.service.js';
|
||||
import type { Worker } from 'bullmq';
|
||||
import type { MosaicJobData } from '../queue/queue.service.js';
|
||||
|
||||
@Injectable()
|
||||
export class CronService implements OnModuleInit, OnModuleDestroy {
|
||||
private readonly logger = new Logger(CronService.name);
|
||||
private readonly tasks: cron.ScheduledTask[] = [];
|
||||
private readonly registeredWorkers: Worker<MosaicJobData>[] = [];
|
||||
|
||||
constructor(
|
||||
@Inject(SummarizationService) private readonly summarization: SummarizationService,
|
||||
@Inject(SessionGCService) private readonly sessionGC: SessionGCService,
|
||||
@Inject(QueueService) private readonly queueService: QueueService,
|
||||
) {}
|
||||
|
||||
onModuleInit(): void {
|
||||
async onModuleInit(): Promise<void> {
|
||||
const summarizationSchedule = process.env['SUMMARIZATION_CRON'] ?? '0 */6 * * *'; // every 6 hours
|
||||
const tierManagementSchedule = process.env['TIER_MANAGEMENT_CRON'] ?? '0 3 * * *'; // daily at 3am
|
||||
const gcSchedule = process.env['SESSION_GC_CRON'] ?? '0 4 * * *'; // daily at 4am
|
||||
|
||||
this.tasks.push(
|
||||
cron.schedule(summarizationSchedule, () => {
|
||||
this.summarization.runSummarization().catch((err) => {
|
||||
this.logger.error(`Scheduled summarization failed: ${err}`);
|
||||
});
|
||||
}),
|
||||
// M6-003: Summarization repeatable job
|
||||
await this.queueService.addRepeatableJob(
|
||||
QUEUE_SUMMARIZATION,
|
||||
'summarization',
|
||||
{},
|
||||
summarizationSchedule,
|
||||
);
|
||||
const summarizationWorker = this.queueService.registerWorker(QUEUE_SUMMARIZATION, async () => {
|
||||
await this.summarization.runSummarization();
|
||||
});
|
||||
this.registeredWorkers.push(summarizationWorker);
|
||||
|
||||
this.tasks.push(
|
||||
cron.schedule(tierManagementSchedule, () => {
|
||||
this.summarization.runTierManagement().catch((err) => {
|
||||
this.logger.error(`Scheduled tier management failed: ${err}`);
|
||||
});
|
||||
}),
|
||||
// M6-005: Tier management repeatable job
|
||||
await this.queueService.addRepeatableJob(
|
||||
QUEUE_TIER_MANAGEMENT,
|
||||
'tier-management',
|
||||
{},
|
||||
tierManagementSchedule,
|
||||
);
|
||||
const tierWorker = this.queueService.registerWorker(QUEUE_TIER_MANAGEMENT, async () => {
|
||||
await this.summarization.runTierManagement();
|
||||
});
|
||||
this.registeredWorkers.push(tierWorker);
|
||||
|
||||
this.tasks.push(
|
||||
cron.schedule(gcSchedule, () => {
|
||||
this.sessionGC.sweepOrphans().catch((err) => {
|
||||
this.logger.error(`Session GC sweep failed: ${err}`);
|
||||
});
|
||||
}),
|
||||
);
|
||||
// M6-004: GC repeatable job
|
||||
await this.queueService.addRepeatableJob(QUEUE_GC, 'session-gc', {}, gcSchedule);
|
||||
const gcWorker = this.queueService.registerWorker(QUEUE_GC, async () => {
|
||||
await this.sessionGC.sweepOrphans();
|
||||
});
|
||||
this.registeredWorkers.push(gcWorker);
|
||||
|
||||
this.logger.log(
|
||||
`Cron scheduled: summarization="${summarizationSchedule}", tier="${tierManagementSchedule}", gc="${gcSchedule}"`,
|
||||
`BullMQ jobs scheduled: summarization="${summarizationSchedule}", tier="${tierManagementSchedule}", gc="${gcSchedule}"`,
|
||||
);
|
||||
}
|
||||
|
||||
onModuleDestroy(): void {
|
||||
for (const task of this.tasks) {
|
||||
task.stop();
|
||||
}
|
||||
this.tasks.length = 0;
|
||||
this.logger.log('Cron tasks stopped');
|
||||
async onModuleDestroy(): Promise<void> {
|
||||
// Workers are closed by QueueService.onModuleDestroy — nothing extra needed here.
|
||||
this.registeredWorkers.length = 0;
|
||||
this.logger.log('CronService destroyed (workers managed by QueueService)');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,10 +7,11 @@ import { LogController } from './log.controller.js';
|
||||
import { SummarizationService } from './summarization.service.js';
|
||||
import { CronService } from './cron.service.js';
|
||||
import { GCModule } from '../gc/gc.module.js';
|
||||
import { QueueModule } from '../queue/queue.module.js';
|
||||
|
||||
@Global()
|
||||
@Module({
|
||||
imports: [GCModule],
|
||||
imports: [GCModule, QueueModule],
|
||||
providers: [
|
||||
{
|
||||
provide: LOG_SERVICE,
|
||||
|
||||
34
apps/gateway/src/queue/queue-admin.dto.ts
Normal file
34
apps/gateway/src/queue/queue-admin.dto.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
export type JobStatus = 'active' | 'completed' | 'failed' | 'waiting' | 'delayed';
|
||||
|
||||
export interface JobDto {
|
||||
id: string;
|
||||
name: string;
|
||||
queue: string;
|
||||
status: JobStatus;
|
||||
attempts: number;
|
||||
maxAttempts: number;
|
||||
createdAt?: string;
|
||||
processedAt?: string;
|
||||
finishedAt?: string;
|
||||
failedReason?: string;
|
||||
data: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface JobListDto {
|
||||
jobs: JobDto[];
|
||||
total: number;
|
||||
}
|
||||
|
||||
export interface QueueStatusDto {
|
||||
name: string;
|
||||
waiting: number;
|
||||
active: number;
|
||||
completed: number;
|
||||
failed: number;
|
||||
delayed: number;
|
||||
paused: boolean;
|
||||
}
|
||||
|
||||
export interface QueueListDto {
|
||||
queues: QueueStatusDto[];
|
||||
}
|
||||
9
apps/gateway/src/queue/queue.module.ts
Normal file
9
apps/gateway/src/queue/queue.module.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { Global, Module } from '@nestjs/common';
|
||||
import { QueueService } from './queue.service.js';
|
||||
|
||||
@Global()
|
||||
@Module({
|
||||
providers: [QueueService],
|
||||
exports: [QueueService],
|
||||
})
|
||||
export class QueueModule {}
|
||||
386
apps/gateway/src/queue/queue.service.ts
Normal file
386
apps/gateway/src/queue/queue.service.ts
Normal file
@@ -0,0 +1,386 @@
|
||||
import {
|
||||
Inject,
|
||||
Injectable,
|
||||
Logger,
|
||||
Optional,
|
||||
type OnModuleInit,
|
||||
type OnModuleDestroy,
|
||||
} from '@nestjs/common';
|
||||
import { Queue, Worker, type Job, type ConnectionOptions } from 'bullmq';
|
||||
import type { LogService } from '@mosaic/log';
|
||||
import { LOG_SERVICE } from '../log/log.tokens.js';
|
||||
import type { JobDto, JobStatus } from './queue-admin.dto.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Typed job definitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface SummarizationJobData {
|
||||
triggeredBy?: string;
|
||||
}
|
||||
|
||||
export interface GCJobData {
|
||||
triggeredBy?: string;
|
||||
}
|
||||
|
||||
export interface TierManagementJobData {
|
||||
triggeredBy?: string;
|
||||
}
|
||||
|
||||
export type MosaicJobData = SummarizationJobData | GCJobData | TierManagementJobData;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Queue health status
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface QueueHealthStatus {
|
||||
queues: Record<
|
||||
string,
|
||||
{
|
||||
waiting: number;
|
||||
active: number;
|
||||
failed: number;
|
||||
completed: number;
|
||||
paused: boolean;
|
||||
}
|
||||
>;
|
||||
healthy: boolean;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const QUEUE_SUMMARIZATION = 'mosaic:summarization';
|
||||
export const QUEUE_GC = 'mosaic:gc';
|
||||
export const QUEUE_TIER_MANAGEMENT = 'mosaic:tier-management';
|
||||
|
||||
const DEFAULT_VALKEY_URL = 'redis://localhost:6380';
|
||||
|
||||
function getConnection(): ConnectionOptions {
|
||||
const url = process.env['VALKEY_URL'] ?? DEFAULT_VALKEY_URL;
|
||||
// BullMQ ConnectionOptions accepts a URL string (ioredis-compatible)
|
||||
return url as unknown as ConnectionOptions;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Job handler type
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type JobHandler<T = MosaicJobData> = (job: Job<T>) => Promise<void>;
|
||||
|
||||
/** System session ID used for job-event log entries (no real user session). */
|
||||
const SYSTEM_SESSION_ID = 'system';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// QueueService
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@Injectable()
|
||||
export class QueueService implements OnModuleInit, OnModuleDestroy {
|
||||
private readonly logger = new Logger(QueueService.name);
|
||||
private readonly connection: ConnectionOptions;
|
||||
private readonly queues = new Map<string, Queue<MosaicJobData>>();
|
||||
private readonly workers = new Map<string, Worker<MosaicJobData>>();
|
||||
|
||||
constructor(
|
||||
@Optional()
|
||||
@Inject(LOG_SERVICE)
|
||||
private readonly logService: LogService | null,
|
||||
) {
|
||||
this.connection = getConnection();
|
||||
}
|
||||
|
||||
onModuleInit(): void {
|
||||
this.logger.log('QueueService initialised (BullMQ)');
|
||||
}
|
||||
|
||||
async onModuleDestroy(): Promise<void> {
|
||||
await this.closeAll();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Queue helpers
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Get or create a BullMQ Queue for the given queue name.
|
||||
*/
|
||||
getQueue<T extends MosaicJobData = MosaicJobData>(name: string): Queue<T> {
|
||||
let queue = this.queues.get(name) as Queue<T> | undefined;
|
||||
if (!queue) {
|
||||
queue = new Queue<T>(name, { connection: this.connection });
|
||||
this.queues.set(name, queue as unknown as Queue<MosaicJobData>);
|
||||
}
|
||||
return queue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a BullMQ repeatable job (cron-style).
|
||||
* Uses `jobId` as a deterministic key so duplicate registrations are idempotent.
|
||||
*/
|
||||
async addRepeatableJob<T extends MosaicJobData>(
|
||||
queueName: string,
|
||||
jobName: string,
|
||||
data: T,
|
||||
cronExpression: string,
|
||||
): Promise<void> {
|
||||
const queue = this.getQueue<T>(queueName);
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await (queue as Queue<any>).add(jobName, data, {
|
||||
repeat: { pattern: cronExpression },
|
||||
jobId: `${queueName}:${jobName}:repeatable`,
|
||||
});
|
||||
this.logger.log(
|
||||
`Repeatable job "${jobName}" registered on "${queueName}" (cron: ${cronExpression})`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a Worker for the given queue name with error handling and
|
||||
* exponential backoff.
|
||||
*/
|
||||
registerWorker<T extends MosaicJobData>(queueName: string, handler: JobHandler<T>): Worker<T> {
|
||||
const worker = new Worker<T>(
|
||||
queueName,
|
||||
async (job) => {
|
||||
this.logger.debug(`Processing job "${job.name}" (id=${job.id}) on queue "${queueName}"`);
|
||||
await this.logJobEvent(
|
||||
queueName,
|
||||
job.name,
|
||||
job.id ?? 'unknown',
|
||||
'started',
|
||||
job.attemptsMade + 1,
|
||||
);
|
||||
await handler(job);
|
||||
},
|
||||
{
|
||||
connection: this.connection,
|
||||
// Exponential backoff: base 5s, factor 2, max 5 attempts
|
||||
settings: {
|
||||
backoffStrategy: (attemptsMade: number) => {
|
||||
return Math.min(5000 * Math.pow(2, attemptsMade - 1), 60_000);
|
||||
},
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
worker.on('completed', (job) => {
|
||||
this.logger.log(`Job "${job.name}" (id=${job.id}) completed on queue "${queueName}"`);
|
||||
this.logJobEvent(
|
||||
queueName,
|
||||
job.name,
|
||||
job.id ?? 'unknown',
|
||||
'completed',
|
||||
job.attemptsMade,
|
||||
).catch((err) => this.logger.warn(`Failed to write completed job log: ${String(err)}`));
|
||||
});
|
||||
|
||||
worker.on('failed', (job, err) => {
|
||||
const errMsg = err instanceof Error ? err.message : String(err);
|
||||
this.logger.error(
|
||||
`Job "${job?.name ?? 'unknown'}" (id=${job?.id ?? 'unknown'}) failed on queue "${queueName}": ${errMsg}`,
|
||||
);
|
||||
this.logJobEvent(
|
||||
queueName,
|
||||
job?.name ?? 'unknown',
|
||||
job?.id ?? 'unknown',
|
||||
'failed',
|
||||
job?.attemptsMade ?? 0,
|
||||
errMsg,
|
||||
).catch((e) => this.logger.warn(`Failed to write failed job log: ${String(e)}`));
|
||||
});
|
||||
|
||||
this.workers.set(queueName, worker as unknown as Worker<MosaicJobData>);
|
||||
return worker;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return queue health statistics for all managed queues.
|
||||
*/
|
||||
async getHealthStatus(): Promise<QueueHealthStatus> {
|
||||
const queues: QueueHealthStatus['queues'] = {};
|
||||
let healthy = true;
|
||||
|
||||
for (const [name, queue] of this.queues) {
|
||||
try {
|
||||
const [waiting, active, failed, completed, paused] = await Promise.all([
|
||||
queue.getWaitingCount(),
|
||||
queue.getActiveCount(),
|
||||
queue.getFailedCount(),
|
||||
queue.getCompletedCount(),
|
||||
queue.isPaused(),
|
||||
]);
|
||||
queues[name] = { waiting, active, failed, completed, paused };
|
||||
} catch (err) {
|
||||
this.logger.error(`Failed to fetch health for queue "${name}": ${err}`);
|
||||
healthy = false;
|
||||
queues[name] = { waiting: 0, active: 0, failed: 0, completed: 0, paused: false };
|
||||
}
|
||||
}
|
||||
|
||||
return { queues, healthy };
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Admin API helpers (M6-006)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* List jobs across all managed queues, optionally filtered by status.
|
||||
* BullMQ jobs are fetched by state type from each queue.
|
||||
*/
|
||||
async listJobs(status?: JobStatus): Promise<JobDto[]> {
|
||||
const jobs: JobDto[] = [];
|
||||
const states: JobStatus[] = status
|
||||
? [status]
|
||||
: ['active', 'completed', 'failed', 'waiting', 'delayed'];
|
||||
|
||||
for (const [queueName, queue] of this.queues) {
|
||||
try {
|
||||
for (const state of states) {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const raw = await (queue as Queue<any>).getJobs([state as any]);
|
||||
for (const j of raw) {
|
||||
jobs.push(this.toJobDto(queueName, j, state));
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
this.logger.warn(`Failed to list jobs for queue "${queueName}": ${String(err)}`);
|
||||
}
|
||||
}
|
||||
|
||||
return jobs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retry a specific failed job by its BullMQ job ID (format: "queueName:id").
|
||||
* The caller passes "<queueName>__<jobId>" as the composite ID because BullMQ
|
||||
* job IDs are not globally unique — they are scoped to their queue.
|
||||
*/
|
||||
async retryJob(compositeId: string): Promise<{ ok: boolean; message: string }> {
|
||||
const sep = compositeId.lastIndexOf('__');
|
||||
if (sep === -1) {
|
||||
return { ok: false, message: 'Invalid job id format. Expected "<queue>__<jobId>".' };
|
||||
}
|
||||
const queueName = compositeId.slice(0, sep);
|
||||
const jobId = compositeId.slice(sep + 2);
|
||||
|
||||
const queue = this.queues.get(queueName);
|
||||
if (!queue) {
|
||||
return { ok: false, message: `Queue "${queueName}" not found.` };
|
||||
}
|
||||
|
||||
const job = await queue.getJob(jobId);
|
||||
if (!job) {
|
||||
return { ok: false, message: `Job "${jobId}" not found in queue "${queueName}".` };
|
||||
}
|
||||
|
||||
const state = await job.getState();
|
||||
if (state !== 'failed') {
|
||||
return { ok: false, message: `Job "${jobId}" is not in failed state (current: ${state}).` };
|
||||
}
|
||||
|
||||
await job.retry('failed');
|
||||
await this.logJobEvent(queueName, job.name, jobId, 'retried', (job.attemptsMade ?? 0) + 1);
|
||||
return { ok: true, message: `Job "${jobId}" on queue "${queueName}" queued for retry.` };
|
||||
}
|
||||
|
||||
/**
|
||||
* Pause a queue by name.
|
||||
*/
|
||||
async pauseQueue(name: string): Promise<{ ok: boolean; message: string }> {
|
||||
const queue = this.queues.get(name);
|
||||
if (!queue) return { ok: false, message: `Queue "${name}" not found.` };
|
||||
await queue.pause();
|
||||
this.logger.log(`Queue paused: ${name}`);
|
||||
return { ok: true, message: `Queue "${name}" paused.` };
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume a paused queue by name.
|
||||
*/
|
||||
async resumeQueue(name: string): Promise<{ ok: boolean; message: string }> {
|
||||
const queue = this.queues.get(name);
|
||||
if (!queue) return { ok: false, message: `Queue "${name}" not found.` };
|
||||
await queue.resume();
|
||||
this.logger.log(`Queue resumed: ${name}`);
|
||||
return { ok: true, message: `Queue "${name}" resumed.` };
|
||||
}
|
||||
|
||||
private toJobDto(queueName: string, job: Job<MosaicJobData>, status: JobStatus): JobDto {
|
||||
return {
|
||||
id: `${queueName}__${job.id ?? 'unknown'}`,
|
||||
name: job.name,
|
||||
queue: queueName,
|
||||
status,
|
||||
attempts: job.attemptsMade,
|
||||
maxAttempts: job.opts?.attempts ?? 1,
|
||||
createdAt: job.timestamp ? new Date(job.timestamp).toISOString() : undefined,
|
||||
processedAt: job.processedOn ? new Date(job.processedOn).toISOString() : undefined,
|
||||
finishedAt: job.finishedOn ? new Date(job.finishedOn).toISOString() : undefined,
|
||||
failedReason: job.failedReason,
|
||||
data: (job.data as Record<string, unknown>) ?? {},
|
||||
};
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Job event logging (M6-007)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/** Write a log entry to agent_logs for BullMQ job lifecycle events. */
|
||||
private async logJobEvent(
|
||||
queueName: string,
|
||||
jobName: string,
|
||||
jobId: string,
|
||||
event: 'started' | 'completed' | 'retried' | 'failed',
|
||||
attempts: number,
|
||||
errorMessage?: string,
|
||||
): Promise<void> {
|
||||
if (!this.logService) return;
|
||||
|
||||
const level = event === 'failed' ? ('error' as const) : ('info' as const);
|
||||
const content =
|
||||
event === 'failed'
|
||||
? `Job "${jobName}" (${jobId}) on queue "${queueName}" failed: ${errorMessage ?? 'unknown error'}`
|
||||
: `Job "${jobName}" (${jobId}) on queue "${queueName}" ${event} (attempt ${attempts})`;
|
||||
|
||||
try {
|
||||
await this.logService.logs.ingest({
|
||||
sessionId: SYSTEM_SESSION_ID,
|
||||
userId: 'system',
|
||||
level,
|
||||
category: 'general',
|
||||
content,
|
||||
metadata: {
|
||||
jobId,
|
||||
jobName,
|
||||
queue: queueName,
|
||||
event,
|
||||
attempts,
|
||||
...(errorMessage ? { errorMessage } : {}),
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
// Log errors must never crash job execution
|
||||
this.logger.warn(`Failed to write job event log for job ${jobId}: ${String(err)}`);
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Lifecycle
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private async closeAll(): Promise<void> {
|
||||
const workerCloses = Array.from(this.workers.values()).map((w) =>
|
||||
w.close().catch((err) => this.logger.error(`Worker close error: ${err}`)),
|
||||
);
|
||||
const queueCloses = Array.from(this.queues.values()).map((q) =>
|
||||
q.close().catch((err) => this.logger.error(`Queue close error: ${err}`)),
|
||||
);
|
||||
await Promise.all([...workerCloses, ...queueCloses]);
|
||||
this.workers.clear();
|
||||
this.queues.clear();
|
||||
this.logger.log('QueueService shut down');
|
||||
}
|
||||
}
|
||||
2
apps/gateway/src/queue/queue.tokens.ts
Normal file
2
apps/gateway/src/queue/queue.tokens.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export const QUEUE_REDIS = 'QUEUE_REDIS';
|
||||
export const QUEUE_SERVICE = 'QUEUE_SERVICE';
|
||||
@@ -7,36 +7,36 @@
|
||||
|
||||
**ID:** harness-20260321
|
||||
**Statement:** Transform Mosaic Stack from a functional demo into a real multi-provider, task-routing AI harness. Persist all conversations, integrate frontier LLM providers (Anthropic, OpenAI, OpenRouter, Z.ai, Ollama), build granular task-aware agent routing, harden agent sessions, replace cron with BullMQ, and design the channel protocol for future Matrix/remote integration.
|
||||
**Phase:** Execution
|
||||
**Current Milestone:** M3: Provider Integration
|
||||
**Progress:** 2 / 7 milestones
|
||||
**Status:** active
|
||||
**Last Updated:** 2026-03-21 UTC
|
||||
**Phase:** Complete
|
||||
**Current Milestone:** All milestones done
|
||||
**Progress:** 7 / 7 milestones
|
||||
**Status:** complete
|
||||
**Last Updated:** 2026-03-22 UTC
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] AC-1: Send messages in TUI → restart TUI → resume conversation → agent has full history and context
|
||||
- [ ] AC-2: Route a coding task to Claude Opus 4.6, a simple question to Haiku, a summarization to GLM-5 — all via granular routing rules
|
||||
- [ ] AC-3: Two users exist, User A's memory searches never return User B's data
|
||||
- [ ] AC-4: `/model claude-sonnet-4-6` in TUI switches the active model for subsequent messages
|
||||
- [ ] AC-5: `/agent coding-agent` in TUI switches to a different agent with different system prompt and tools
|
||||
- [ ] AC-6: BullMQ jobs execute on schedule, failures retry with backoff, admin can inspect via `/api/admin/jobs`
|
||||
- [ ] AC-7: Channel protocol document exists with Matrix integration points defined, reviewed, and approved
|
||||
- [ ] AC-8: Embeddings run on Ollama local models (no external API dependency for vector operations)
|
||||
- [ ] AC-9: All five providers (Anthropic, OpenAI, OpenRouter, Z.ai, Ollama) connect, list models, and complete chat requests
|
||||
- [ ] AC-10: Routing transparency — TUI displays which model was selected and the routing reason for each response
|
||||
- [x] AC-1: Send messages in TUI → restart TUI → resume conversation → agent has full history and context
|
||||
- [x] AC-2: Route a coding task to Claude Opus 4.6, a simple question to Haiku, a summarization to GLM-5 — all via granular routing rules
|
||||
- [x] AC-3: Two users exist, User A's memory searches never return User B's data
|
||||
- [x] AC-4: `/model claude-sonnet-4-6` in TUI switches the active model for subsequent messages
|
||||
- [x] AC-5: `/agent coding-agent` in TUI switches to a different agent with different system prompt and tools
|
||||
- [x] AC-6: BullMQ jobs execute on schedule, failures retry with backoff, admin can inspect via `/api/admin/jobs`
|
||||
- [x] AC-7: Channel protocol document exists with Matrix integration points defined, reviewed, and approved
|
||||
- [x] AC-8: Embeddings run on Ollama local models (no external API dependency for vector operations)
|
||||
- [x] AC-9: All five providers (Anthropic, OpenAI, OpenRouter, Z.ai, Ollama) connect, list models, and complete chat requests
|
||||
- [x] AC-10: Routing transparency — TUI displays which model was selected and the routing reason for each response
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------ | ---------------------------------- | ----------- | ------ | --------- | ---------- | ---------- |
|
||||
| 1 | ms-166 | Conversation Persistence & Context | done | — | #224–#231 | 2026-03-21 | 2026-03-21 |
|
||||
| 2 | ms-167 | Security & Isolation | done | — | #232–#239 | 2026-03-21 | 2026-03-21 |
|
||||
| 3 | ms-168 | Provider Integration | in-progress | — | #240–#251 | 2026-03-21 | — |
|
||||
| 4 | ms-169 | Agent Routing Engine | not-started | — | #252–#264 | — | — |
|
||||
| 5 | ms-170 | Agent Session Hardening | not-started | — | #265–#272 | — | — |
|
||||
| 6 | ms-171 | Job Queue Foundation | not-started | — | #273–#280 | — | — |
|
||||
| 7 | ms-172 | Channel Protocol Design | not-started | — | #281–#288 | — | — |
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------ | ---------------------------------- | ------ | ------ | --------- | ---------- | ---------- |
|
||||
| 1 | ms-166 | Conversation Persistence & Context | done | — | #224–#231 | 2026-03-21 | 2026-03-21 |
|
||||
| 2 | ms-167 | Security & Isolation | done | — | #232–#239 | 2026-03-21 | 2026-03-21 |
|
||||
| 3 | ms-168 | Provider Integration | done | — | #240–#251 | 2026-03-21 | 2026-03-22 |
|
||||
| 4 | ms-169 | Agent Routing Engine | done | — | #252–#264 | 2026-03-22 | 2026-03-22 |
|
||||
| 5 | ms-170 | Agent Session Hardening | done | — | #265–#272 | 2026-03-22 | 2026-03-22 |
|
||||
| 6 | ms-171 | Job Queue Foundation | done | — | #273–#280 | 2026-03-22 | 2026-03-22 |
|
||||
| 7 | ms-172 | Channel Protocol Design | done | — | #281–#288 | 2026-03-22 | 2026-03-22 |
|
||||
|
||||
## Deployment
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
## Coordination
|
||||
|
||||
- **Primary Agent:** claude-opus-4-6
|
||||
- **Sibling Agents:** codex (for pure coding tasks), sonnet (for review/standard work)
|
||||
- **Sibling Agents:** sonnet (workers), haiku (verification)
|
||||
- **Shared Contracts:** docs/PRD-Harness_Foundation.md, docs/TASKS.md
|
||||
|
||||
## Token Budget
|
||||
@@ -56,14 +56,14 @@
|
||||
| Metric | Value |
|
||||
| ------ | ------ |
|
||||
| Budget | — |
|
||||
| Used | 0 |
|
||||
| Used | ~2.5M |
|
||||
| Mode | normal |
|
||||
|
||||
## Session History
|
||||
|
||||
| Session | Runtime | Started | Duration | Ended Reason | Last Task |
|
||||
| ------- | --------------- | ---------- | -------- | ------------ | ------------- |
|
||||
| 1 | claude-opus-4-6 | 2026-03-21 | — | — | Planning gate |
|
||||
| Session | Runtime | Started | Duration | Ended Reason | Last Task |
|
||||
| ------- | --------------- | ---------- | -------- | ------------ | ----------------- |
|
||||
| 1 | claude-opus-4-6 | 2026-03-21 | ~6h | complete | M7-008 — all done |
|
||||
|
||||
## Scratchpad
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
- **Owner:** Jason Woltje
|
||||
- **Date:** 2026-03-21
|
||||
- **Status:** draft
|
||||
- **Status:** completed
|
||||
- **Phase:** 9 (post-MVP)
|
||||
- **Version Target:** v0.2.0
|
||||
- **Agent Harness:** [Pi SDK](https://github.com/badlogic/pi-mono)
|
||||
|
||||
135
docs/TASKS.md
135
docs/TASKS.md
@@ -3,72 +3,71 @@
|
||||
> Single-writer: orchestrator only. Workers read but never modify.
|
||||
>
|
||||
> **`agent` column values:** `codex` | `sonnet` | `haiku` | `glm-5` | `opus` | `—` (auto/default)
|
||||
> Pipeline crons pick the cheapest capable model. Override with a specific value when a task genuinely needs it.
|
||||
|
||||
| id | status | agent | milestone | description | pr | notes |
|
||||
| ------ | ----------- | ------ | ------------------ | --------------------------------------------------------------------------------------------------------------- | ---- | ------------------------------------------------ |
|
||||
| M1-001 | done | sonnet | M1: Persistence | Wire ChatGateway.handleMessage() → ConversationsRepo.addMessage() for user messages | #292 | #224 closed |
|
||||
| M1-002 | done | sonnet | M1: Persistence | Wire agent event relay → ConversationsRepo.addMessage() for assistant responses (text, tool calls, thinking) | #292 | #225 closed |
|
||||
| M1-003 | done | sonnet | M1: Persistence | Store message metadata: model used, provider, token counts, tool call details, timestamps | #292 | #226 closed |
|
||||
| M1-004 | done | sonnet | M1: Persistence | On session resume, load message history from DB and inject into Pi session context | #301 | #227 closed |
|
||||
| M1-005 | done | sonnet | M1: Persistence | Context window management: summarize older messages when history exceeds 80% of model context | #301 | #228 closed |
|
||||
| M1-006 | done | sonnet | M1: Persistence | Conversation search: full-text search on messages table via /api/conversations/search | #299 | #229 closed |
|
||||
| M1-007 | done | sonnet | M1: Persistence | TUI: /history command to display conversation message count and context usage | #297 | #230 closed |
|
||||
| M1-008 | done | sonnet | M1: Persistence | Verify: send messages → kill TUI → resume with -c → agent references prior context | #304 | #231 closed — 20 integration tests |
|
||||
| M2-001 | done | sonnet | M2: Security | Audit InsightsRepo: add userId filter to searchByEmbedding() vector search | #290 | #232 closed |
|
||||
| M2-002 | done | sonnet | M2: Security | Audit InsightsRepo: add userId filter to findByUser(), decayOldInsights() | #290 | #233 closed |
|
||||
| M2-003 | done | sonnet | M2: Security | Audit PreferencesRepo: verify all queries filter by userId | #294 | #234 closed — already scoped |
|
||||
| M2-004 | done | sonnet | M2: Security | Audit agent memory tools: verify memory*search, memory_save*_, memory*get*_ scope to session user | #294 | #235 closed — FIXED userId injection |
|
||||
| M2-005 | done | sonnet | M2: Security | Audit ConversationsRepo: verify ownership check on findById, update, delete, addMessage, findMessages | #293 | #236 closed |
|
||||
| M2-006 | done | sonnet | M2: Security | Audit AgentsRepo: verify findAccessible() returns only user's agents + system agents | #293 | #237 closed |
|
||||
| M2-007 | done | sonnet | M2: Security | Integration test: create two users, populate data, verify cross-user isolation on every query path | #305 | #238 closed — 28 integration tests |
|
||||
| M2-008 | done | sonnet | M2: Security | Audit Valkey keys: verify session keys include userId or are not enumerable across users | #298 | #239 closed — SCAN replaces KEYS, /gc admin-only |
|
||||
| M3-001 | done | sonnet | M3: Providers | Refactor ProviderService into IProviderAdapter pattern: register(), listModels(), healthCheck(), createClient() | #306 | #240 closed |
|
||||
| M3-002 | done | sonnet | M3: Providers | Anthropic adapter: @anthropic-ai/sdk, Claude Sonnet 4.6 + Opus 4.6 + Haiku 4.5, OAuth + API key | #309 | #241 closed |
|
||||
| M3-003 | done | sonnet | M3: Providers | OpenAI adapter: openai SDK, Codex gpt-5.4, OAuth + API key | #310 | #242 closed |
|
||||
| M3-004 | done | sonnet | M3: Providers | OpenRouter adapter: OpenAI-compatible client, API key, dynamic model list from /api/v1/models | #311 | #243 closed |
|
||||
| M3-005 | in-progress | sonnet | M3: Providers | Z.ai GLM adapter: GLM-5, API key, research API format | — | #244 |
|
||||
| M3-006 | done | sonnet | M3: Providers | Ollama adapter: refactor existing integration into adapter pattern, add embedding model support | #311 | #245 closed |
|
||||
| M3-007 | done | sonnet | M3: Providers | Provider health check: periodic probe, configurable interval, status per provider, /api/providers/health | #308 | #246 closed |
|
||||
| M3-008 | done | sonnet | M3: Providers | Model capability matrix: per-model metadata (tier, context window, tool support, vision, streaming, embedding) | #303 | #247 closed |
|
||||
| M3-009 | done | sonnet | M3: Providers | Refactor EmbeddingService: provider-agnostic interface, Ollama default (nomic-embed-text or mxbai-embed-large) | #308 | #248 closed |
|
||||
| M3-010 | in-progress | sonnet | M3: Providers | OAuth token storage: persist provider tokens per user in DB (encrypted), refresh flow | — | #249 |
|
||||
| M3-011 | in-progress | sonnet | M3: Providers | Provider config UI support: /api/providers CRUD for user-scoped provider credentials | — | #250 |
|
||||
| M3-012 | not-started | haiku | M3: Providers | Verify: each provider connects, lists models, completes chat request, handles errors | — | #251 |
|
||||
| M4-001 | in-progress | sonnet | M4: Routing | Define routing rule schema: RoutingRule { name, priority, conditions[], action } stored in DB | — | #252 DB migration |
|
||||
| M4-002 | in-progress | sonnet | M4: Routing | Condition types: taskType, complexity, domain, costTier, requiredCapabilities | — | #253 |
|
||||
| M4-003 | in-progress | sonnet | M4: Routing | Action types: routeTo { provider, model, agentConfigId?, systemPromptOverride?, toolAllowlist? } | — | #254 |
|
||||
| M4-004 | in-progress | sonnet | M4: Routing | Default routing rules seed data: coding→Opus, Q&A→Sonnet, summarization→GLM-5, research→Codex, offline→Ollama | — | #255 |
|
||||
| M4-005 | in-progress | sonnet | M4: Routing | Task classification: infer taskType + complexity from user message (regex/keyword first, LLM-assisted later) | — | #256 |
|
||||
| M4-006 | not-started | opus | M4: Routing | Routing decision pipeline: classify → match rules → check health → fallback chain → return result | — | #257 |
|
||||
| M4-007 | not-started | sonnet | M4: Routing | Routing override: /model forces specific model regardless of routing rules | — | #258 |
|
||||
| M4-008 | not-started | sonnet | M4: Routing | Routing transparency: include routing decision in session:info event (model + reason) | — | #259 |
|
||||
| M4-009 | not-started | sonnet | M4: Routing | Routing rules CRUD: /api/routing/rules — list, create, update, delete, reorder priority | — | #260 |
|
||||
| M4-010 | not-started | sonnet | M4: Routing | Per-user routing overrides: users customize default rules for their sessions | — | #261 |
|
||||
| M4-011 | not-started | sonnet | M4: Routing | Agent specialization: agents declare capabilities in config (domains, preferred models, tool sets) | — | #262 |
|
||||
| M4-012 | not-started | sonnet | M4: Routing | Routing integration: wire into ChatGateway — every message triggers routing before agent dispatch | — | #263 |
|
||||
| M4-013 | not-started | haiku | M4: Routing | Verify: coding→Opus, summarize→GLM-5, simple→Haiku, override via /model works | — | #264 |
|
||||
| M5-001 | not-started | sonnet | M5: Sessions | Wire ChatGateway: on session create, load agent config from DB (system prompt, model, provider, tools, skills) | — | #265 |
|
||||
| M5-002 | not-started | sonnet | M5: Sessions | /model command: end-to-end wiring — TUI → socket → gateway switches provider/model → new messages use it | — | #266 |
|
||||
| M5-003 | not-started | sonnet | M5: Sessions | /agent command: switch agent config mid-session — loads new system prompt, tools, default model | — | #267 |
|
||||
| M5-004 | not-started | sonnet | M5: Sessions | Session ↔ conversation binding: persist sessionId on conversation record, resume via conversationId | — | #268 |
|
||||
| M5-005 | not-started | sonnet | M5: Sessions | Session info broadcast: on model/agent switch, emit session:info with updated state | — | #269 |
|
||||
| M5-006 | not-started | sonnet | M5: Sessions | Agent creation from TUI: /agent new command creates agent config via gateway API | — | #270 |
|
||||
| M5-007 | not-started | sonnet | M5: Sessions | Session metrics: per-session token usage, model switches, duration — persist in DB | — | #271 |
|
||||
| M5-008 | not-started | haiku | M5: Sessions | Verify: /model switches model, /agent switches agent, session resume loads config | — | #272 |
|
||||
| M6-001 | not-started | sonnet | M6: Jobs | Add BullMQ dependency, configure with Valkey connection | — | #273 Test compat first |
|
||||
| M6-002 | not-started | sonnet | M6: Jobs | Create queue service: typed job definitions, worker registration, error handling with exponential backoff | — | #274 |
|
||||
| M6-003 | not-started | sonnet | M6: Jobs | Migrate summarization cron → BullMQ repeatable job | — | #275 |
|
||||
| M6-004 | not-started | sonnet | M6: Jobs | Migrate GC (session cleanup) → BullMQ repeatable job | — | #276 |
|
||||
| M6-005 | not-started | sonnet | M6: Jobs | Migrate tier management (log archival) → BullMQ repeatable job | — | #277 |
|
||||
| M6-006 | not-started | sonnet | M6: Jobs | Admin jobs API: GET /api/admin/jobs — list, status, retry, pause/resume queues | — | #278 |
|
||||
| M6-007 | not-started | sonnet | M6: Jobs | Job event logging: emit job start/complete/fail events to agent_logs | — | #279 |
|
||||
| M6-008 | not-started | haiku | M6: Jobs | Verify: jobs execute on schedule, failure retries with backoff, admin endpoint shows history | — | #280 |
|
||||
| M7-001 | not-started | opus | M7: Channel Design | Define IChannelAdapter interface: lifecycle, message flow, identity mapping | — | #281 Architecture |
|
||||
| M7-002 | not-started | opus | M7: Channel Design | Define channel message protocol: canonical format all adapters translate to/from | — | #282 Architecture |
|
||||
| M7-003 | not-started | opus | M7: Channel Design | Design Matrix integration: appservice, room↔conversation, space↔team, agent ghosts, power levels | — | #283 Architecture |
|
||||
| M7-004 | not-started | opus | M7: Channel Design | Design conversation multiplexing: same conversation from TUI+WebUI+Matrix, real-time sync | — | #284 Architecture |
|
||||
| M7-005 | not-started | opus | M7: Channel Design | Design remote auth bridging: Matrix/Discord auth → Mosaic identity (token linking, OAuth bridge) | — | #285 Architecture |
|
||||
| M7-006 | not-started | opus | M7: Channel Design | Design agent-to-agent communication via Matrix rooms: room per agent pair, human observation | — | #286 Architecture |
|
||||
| M7-007 | not-started | opus | M7: Channel Design | Design multi-user isolation in Matrix: space-per-team, room visibility, encryption, admin access | — | #287 Architecture |
|
||||
| M7-008 | not-started | haiku | M7: Channel Design | Publish docs/architecture/channel-protocol.md — reviewed and approved | — | #288 |
|
||||
| id | status | agent | milestone | description | pr | notes |
|
||||
| ------ | ------ | ------ | ------------------ | ------------------------------------------------------------------ | ---- | ----------- |
|
||||
| M1-001 | done | sonnet | M1: Persistence | Wire ChatGateway → ConversationsRepo for user messages | #292 | #224 closed |
|
||||
| M1-002 | done | sonnet | M1: Persistence | Wire agent event relay → ConversationsRepo for assistant responses | #292 | #225 closed |
|
||||
| M1-003 | done | sonnet | M1: Persistence | Store message metadata: model, provider, tokens, tool calls | #292 | #226 closed |
|
||||
| M1-004 | done | sonnet | M1: Persistence | Load message history into Pi session on resume | #301 | #227 closed |
|
||||
| M1-005 | done | sonnet | M1: Persistence | Context window management: summarize when >80% | #301 | #228 closed |
|
||||
| M1-006 | done | sonnet | M1: Persistence | Conversation search endpoint | #299 | #229 closed |
|
||||
| M1-007 | done | sonnet | M1: Persistence | TUI /history command | #297 | #230 closed |
|
||||
| M1-008 | done | sonnet | M1: Persistence | Verify persistence — 20 tests | #304 | #231 closed |
|
||||
| M2-001 | done | sonnet | M2: Security | InsightsRepo userId on searchByEmbedding | #290 | #232 closed |
|
||||
| M2-002 | done | sonnet | M2: Security | InsightsRepo userId on findByUser/decay | #290 | #233 closed |
|
||||
| M2-003 | done | sonnet | M2: Security | PreferencesRepo userId verified | #294 | #234 closed |
|
||||
| M2-004 | done | sonnet | M2: Security | Memory tools userId injection fixed | #294 | #235 closed |
|
||||
| M2-005 | done | sonnet | M2: Security | ConversationsRepo ownership checks | #293 | #236 closed |
|
||||
| M2-006 | done | sonnet | M2: Security | AgentsRepo findAccessible scoped | #293 | #237 closed |
|
||||
| M2-007 | done | sonnet | M2: Security | Cross-user isolation — 28 tests | #305 | #238 closed |
|
||||
| M2-008 | done | sonnet | M2: Security | Valkey SCAN + /gc admin-only | #298 | #239 closed |
|
||||
| M3-001 | done | sonnet | M3: Providers | IProviderAdapter + OllamaAdapter | #306 | #240 closed |
|
||||
| M3-002 | done | sonnet | M3: Providers | AnthropicAdapter | #309 | #241 closed |
|
||||
| M3-003 | done | sonnet | M3: Providers | OpenAIAdapter | #310 | #242 closed |
|
||||
| M3-004 | done | sonnet | M3: Providers | OpenRouterAdapter | #311 | #243 closed |
|
||||
| M3-005 | done | sonnet | M3: Providers | ZaiAdapter (GLM-5) | #314 | #244 closed |
|
||||
| M3-006 | done | sonnet | M3: Providers | Ollama embedding support | #311 | #245 closed |
|
||||
| M3-007 | done | sonnet | M3: Providers | Provider health checks | #308 | #246 closed |
|
||||
| M3-008 | done | sonnet | M3: Providers | Model capability matrix | #303 | #247 closed |
|
||||
| M3-009 | done | sonnet | M3: Providers | EmbeddingService → Ollama default | #308 | #248 closed |
|
||||
| M3-010 | done | sonnet | M3: Providers | OAuth token storage (AES-256-GCM) | #317 | #249 closed |
|
||||
| M3-011 | done | sonnet | M3: Providers | Provider credentials CRUD | #317 | #250 closed |
|
||||
| M3-012 | done | sonnet | M3: Providers | Verify providers — 40 tests | #319 | #251 closed |
|
||||
| M4-001 | done | sonnet | M4: Routing | routing_rules DB schema | #315 | #252 closed |
|
||||
| M4-002 | done | sonnet | M4: Routing | Condition types | #315 | #253 closed |
|
||||
| M4-003 | done | sonnet | M4: Routing | Action types | #315 | #254 closed |
|
||||
| M4-004 | done | sonnet | M4: Routing | Default routing rules (11 seeds) | #316 | #255 closed |
|
||||
| M4-005 | done | sonnet | M4: Routing | Task classifier (60+ tests) | #316 | #256 closed |
|
||||
| M4-006 | done | sonnet | M4: Routing | Routing decision pipeline | #318 | #257 closed |
|
||||
| M4-007 | done | sonnet | M4: Routing | /model override | #323 | #258 closed |
|
||||
| M4-008 | done | sonnet | M4: Routing | Routing transparency in session:info | #323 | #259 closed |
|
||||
| M4-009 | done | sonnet | M4: Routing | Routing rules CRUD API | #320 | #260 closed |
|
||||
| M4-010 | done | sonnet | M4: Routing | Per-user routing overrides | #320 | #261 closed |
|
||||
| M4-011 | done | sonnet | M4: Routing | Agent specialization capabilities | #320 | #262 closed |
|
||||
| M4-012 | done | sonnet | M4: Routing | Routing wired into ChatGateway | #323 | #263 closed |
|
||||
| M4-013 | done | sonnet | M4: Routing | Verify routing — 9 E2E tests | #323 | #264 closed |
|
||||
| M5-001 | done | sonnet | M5: Sessions | Agent config loaded on session create | #323 | #265 closed |
|
||||
| M5-002 | done | sonnet | M5: Sessions | /model command end-to-end | #323 | #266 closed |
|
||||
| M5-003 | done | sonnet | M5: Sessions | /agent command mid-session | #323 | #267 closed |
|
||||
| M5-004 | done | sonnet | M5: Sessions | Session ↔ conversation binding | #321 | #268 closed |
|
||||
| M5-005 | done | sonnet | M5: Sessions | Session info broadcast | #321 | #269 closed |
|
||||
| M5-006 | done | sonnet | M5: Sessions | /agent new from TUI | #321 | #270 closed |
|
||||
| M5-007 | done | sonnet | M5: Sessions | Session metrics | #321 | #271 closed |
|
||||
| M5-008 | done | sonnet | M5: Sessions | Verify sessions — 28 tests | #324 | #272 closed |
|
||||
| M6-001 | done | sonnet | M6: Jobs | BullMQ + Valkey config | #324 | #273 closed |
|
||||
| M6-002 | done | sonnet | M6: Jobs | Queue service with typed jobs | #324 | #274 closed |
|
||||
| M6-003 | done | sonnet | M6: Jobs | Summarization → BullMQ | #324 | #275 closed |
|
||||
| M6-004 | done | sonnet | M6: Jobs | GC → BullMQ | #324 | #276 closed |
|
||||
| M6-005 | done | sonnet | M6: Jobs | Tier management → BullMQ | #324 | #277 closed |
|
||||
| M6-006 | done | sonnet | M6: Jobs | Admin jobs API | #325 | #278 closed |
|
||||
| M6-007 | done | sonnet | M6: Jobs | Job event logging | #325 | #279 closed |
|
||||
| M6-008 | done | sonnet | M6: Jobs | Verify jobs | #324 | #280 closed |
|
||||
| M7-001 | done | sonnet | M7: Channel Design | IChannelAdapter interface | #325 | #281 closed |
|
||||
| M7-002 | done | sonnet | M7: Channel Design | Channel message protocol | #325 | #282 closed |
|
||||
| M7-003 | done | sonnet | M7: Channel Design | Matrix integration design | #326 | #283 closed |
|
||||
| M7-004 | done | sonnet | M7: Channel Design | Conversation multiplexing | #326 | #284 closed |
|
||||
| M7-005 | done | sonnet | M7: Channel Design | Remote auth bridging | #326 | #285 closed |
|
||||
| M7-006 | done | sonnet | M7: Channel Design | Agent-to-agent via Matrix | #326 | #286 closed |
|
||||
| M7-007 | done | sonnet | M7: Channel Design | Multi-user isolation in Matrix | #326 | #287 closed |
|
||||
| M7-008 | done | sonnet | M7: Channel Design | channel-protocol.md published | #326 | #288 closed |
|
||||
|
||||
743
docs/architecture/channel-protocol.md
Normal file
743
docs/architecture/channel-protocol.md
Normal file
@@ -0,0 +1,743 @@
|
||||
# Channel Protocol Architecture
|
||||
|
||||
**Status:** Draft
|
||||
**Authors:** Mosaic Core Team
|
||||
**Last Updated:** 2026-03-22
|
||||
**Covers:** M7-001 (IChannelAdapter interface), M7-002 (ChannelMessage protocol), M7-003 (Matrix integration design), M7-004 (conversation multiplexing), M7-005 (remote auth bridging), M7-006 (agent-to-agent communication via Matrix), M7-007 (multi-user isolation in Matrix)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The channel protocol defines a unified abstraction layer between Mosaic's core messaging infrastructure and the external communication channels it supports (Matrix, Discord, Telegram, TUI, WebUI, and future channels).
|
||||
|
||||
The protocol consists of two main contracts:
|
||||
|
||||
1. `IChannelAdapter` — the interface each channel driver must implement.
|
||||
2. `ChannelMessage` — the canonical message format that flows through the system.
|
||||
|
||||
All channel-specific translation logic lives inside the adapter implementation. The rest of Mosaic works exclusively with `ChannelMessage` objects.
|
||||
|
||||
---
|
||||
|
||||
## M7-001: IChannelAdapter Interface
|
||||
|
||||
```typescript
|
||||
interface IChannelAdapter {
|
||||
/**
|
||||
* Stable, lowercase identifier for this channel (e.g. "matrix", "discord").
|
||||
* Used as a namespace key in registry lookups and log metadata.
|
||||
*/
|
||||
readonly name: string;
|
||||
|
||||
/**
|
||||
* Establish a connection to the external channel backend.
|
||||
* Called once at application startup. Must be idempotent (safe to call
|
||||
* when already connected).
|
||||
*/
|
||||
connect(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Gracefully disconnect from the channel backend.
|
||||
* Must flush in-flight sends and release resources before resolving.
|
||||
*/
|
||||
disconnect(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Return the current health of the adapter connection.
|
||||
* Used by the admin health endpoint and alerting.
|
||||
*
|
||||
* - "connected" — fully operational
|
||||
* - "degraded" — partial connectivity (e.g. read-only, rate-limited)
|
||||
* - "disconnected" — no connection to channel backend
|
||||
*/
|
||||
health(): Promise<{ status: 'connected' | 'degraded' | 'disconnected' }>;
|
||||
|
||||
/**
|
||||
* Register an inbound message handler.
|
||||
* The adapter calls `handler` for every message received from the channel.
|
||||
* Multiple calls replace the previous handler (last-write-wins).
|
||||
* The handler is async; the adapter must not deliver new messages until
|
||||
* the previous handler promise resolves (back-pressure).
|
||||
*/
|
||||
onMessage(handler: (msg: ChannelMessage) => Promise<void>): void;
|
||||
|
||||
/**
|
||||
* Send a ChannelMessage to the given channel/room/conversation.
|
||||
* `channelId` is the channel-native identifier (e.g. Matrix room ID,
|
||||
* Discord channel snowflake, Telegram chat ID).
|
||||
*/
|
||||
sendMessage(channelId: string, msg: ChannelMessage): Promise<void>;
|
||||
|
||||
/**
|
||||
* Map a channel-native user identifier to the Mosaic internal userId.
|
||||
* Returns null when no matching Mosaic account exists for the given
|
||||
* channelUserId (anonymous or unlinked user).
|
||||
*/
|
||||
mapIdentity(channelUserId: string): Promise<string | null>;
|
||||
}
|
||||
```
|
||||
|
||||
### Adapter Registration
|
||||
|
||||
Adapters are registered with the `ChannelRegistry` service at startup. The registry calls `connect()` on each adapter and monitors `health()` on a configurable interval (default: 30 s).
|
||||
|
||||
```
|
||||
ChannelRegistry
|
||||
└── register(adapter: IChannelAdapter): void
|
||||
└── getAdapter(name: string): IChannelAdapter | null
|
||||
└── listAdapters(): IChannelAdapter[]
|
||||
└── healthAll(): Promise<Record<string, AdapterHealth>>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## M7-002: ChannelMessage Protocol
|
||||
|
||||
### Canonical Message Format
|
||||
|
||||
```typescript
|
||||
interface ChannelMessage {
|
||||
/**
|
||||
* Globally unique message ID.
|
||||
* Format: UUID v4. Generated by the adapter when receiving, or by Mosaic
|
||||
* when sending. Channel-native IDs are stored in metadata.channelMessageId.
|
||||
*/
|
||||
id: string;
|
||||
|
||||
/**
|
||||
* Channel-native room/conversation/channel identifier.
|
||||
* The adapter populates this from the inbound message.
|
||||
* For outbound messages, the caller supplies the target channel.
|
||||
*/
|
||||
channelId: string;
|
||||
|
||||
/**
|
||||
* Channel-native identifier of the message sender.
|
||||
* For Mosaic-originated messages this is the Mosaic userId or agentId.
|
||||
*/
|
||||
senderId: string;
|
||||
|
||||
/** Sender classification. */
|
||||
senderType: 'user' | 'agent' | 'system';
|
||||
|
||||
/**
|
||||
* Textual content of the message.
|
||||
* For non-text content types (image, file) this may be an empty string
|
||||
* or an alt-text description; the actual payload is in `attachments`.
|
||||
*/
|
||||
content: string;
|
||||
|
||||
/**
|
||||
* Hint for how `content` should be interpreted and rendered.
|
||||
* - "text" — plain text, no special rendering
|
||||
* - "markdown" — CommonMark markdown
|
||||
* - "code" — code block (use metadata.language for the language tag)
|
||||
* - "image" — binary image; content is empty, see attachments
|
||||
* - "file" — binary file; content is empty, see attachments
|
||||
*/
|
||||
contentType: 'text' | 'markdown' | 'code' | 'image' | 'file';
|
||||
|
||||
/**
|
||||
* Arbitrary key-value metadata for channel-specific extension fields.
|
||||
* Examples: { channelMessageId, language, reactionEmoji, channelType }.
|
||||
* Adapters should store channel-native IDs here so round-trip correlation
|
||||
* is possible without altering the canonical fields.
|
||||
*/
|
||||
metadata: Record<string, unknown>;
|
||||
|
||||
/**
|
||||
* Optional thread or reply-chain identifier.
|
||||
* For threaded channels (Matrix, Discord threads, Telegram topics) this
|
||||
* groups messages into a logical thread scoped to the same channelId.
|
||||
*/
|
||||
threadId?: string;
|
||||
|
||||
/**
|
||||
* The canonical message ID this message is a reply to.
|
||||
* Maps to channel-native reply/quote mechanisms in each adapter.
|
||||
*/
|
||||
replyToId?: string;
|
||||
|
||||
/**
|
||||
* Binary or URI-referenced attachments.
|
||||
* Each attachment carries its MIME type and a URL or base64 payload.
|
||||
*/
|
||||
attachments?: ChannelAttachment[];
|
||||
|
||||
/** Wall-clock timestamp when the message was sent/received. */
|
||||
timestamp: Date;
|
||||
}
|
||||
|
||||
interface ChannelAttachment {
|
||||
/** Filename or identifier. */
|
||||
name: string;
|
||||
|
||||
/** MIME type (e.g. "image/png", "application/pdf"). */
|
||||
mimeType: string;
|
||||
|
||||
/**
|
||||
* URL pointing to the attachment, OR a `data:` URI with base64 payload.
|
||||
* Adapters that receive file uploads SHOULD store to object storage and
|
||||
* populate a stable URL here rather than embedding the raw bytes.
|
||||
*/
|
||||
url: string;
|
||||
|
||||
/** Size in bytes, if known. */
|
||||
sizeBytes?: number;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Channel Translation Reference
|
||||
|
||||
The following sections document how each supported channel maps its native message format to and from `ChannelMessage`.
|
||||
|
||||
### Matrix
|
||||
|
||||
| ChannelMessage field | Matrix equivalent |
|
||||
| -------------------- | --------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `id` | Generated UUID; `metadata.channelMessageId` = Matrix event ID (`$...`) |
|
||||
| `channelId` | Matrix room ID (`!roomid:homeserver`) |
|
||||
| `senderId` | Matrix user ID (`@user:homeserver`) |
|
||||
| `senderType` | Always `"user"` for inbound; `"agent"` or `"system"` for outbound |
|
||||
| `content` | `event.content.body` |
|
||||
| `contentType` | `"markdown"` if `msgtype = m.text` and body contains markdown; `"text"` otherwise; `"image"` for `m.image`; `"file"` for `m.file` |
|
||||
| `threadId` | `event.content['m.relates_to']['event_id']` when `rel_type = m.thread` |
|
||||
| `replyToId` | Mosaic ID looked up from `event.content['m.relates_to']['m.in_reply_to']['event_id']` |
|
||||
| `attachments` | Populated from `url` in `m.image` / `m.file` events |
|
||||
| `timestamp` | `new Date(event.origin_server_ts)` |
|
||||
| `metadata` | `{ channelMessageId, roomId, eventType, unsigned }` |
|
||||
|
||||
**Outbound:** Adapter sends `m.room.message` with `msgtype = m.text` (or `m.notice` for system messages). Markdown content is sent with `format = org.matrix.custom.html` and a rendered HTML body.
|
||||
|
||||
---
|
||||
|
||||
### Discord
|
||||
|
||||
| ChannelMessage field | Discord equivalent |
|
||||
| -------------------- | ----------------------------------------------------------------------- |
|
||||
| `id` | Generated UUID; `metadata.channelMessageId` = Discord message snowflake |
|
||||
| `channelId` | Discord channel ID (snowflake string) |
|
||||
| `senderId` | Discord user ID (snowflake) |
|
||||
| `senderType` | `"user"` for human members; `"agent"` for bot messages |
|
||||
| `content` | `message.content` |
|
||||
| `contentType` | `"markdown"` (Discord uses a markdown-like syntax natively) |
|
||||
| `threadId` | `message.thread.id` when the message is inside a thread channel |
|
||||
| `replyToId` | Mosaic ID looked up from `message.referenced_message.id` |
|
||||
| `attachments` | `message.attachments` mapped to `ChannelAttachment` |
|
||||
| `timestamp` | `new Date(message.timestamp)` |
|
||||
| `metadata` | `{ channelMessageId, guildId, channelType, mentions, embeds }` |
|
||||
|
||||
**Outbound:** Adapter calls Discord REST `POST /channels/{id}/messages`. Markdown content is sent as-is (Discord renders it). For `contentType = "code"` the adapter wraps in triple-backtick fences with the `metadata.language` tag.
|
||||
|
||||
---
|
||||
|
||||
### Telegram
|
||||
|
||||
| ChannelMessage field | Telegram equivalent |
|
||||
| -------------------- | ------------------------------------------------------------------------------------------------------------- |
|
||||
| `id` | Generated UUID; `metadata.channelMessageId` = Telegram `message_id` (integer) |
|
||||
| `channelId` | Telegram `chat_id` (integer as string) |
|
||||
| `senderId` | Telegram `from.id` (integer as string) |
|
||||
| `senderType` | `"user"` for human senders; `"agent"` for bot-originated messages |
|
||||
| `content` | `message.text` or `message.caption` |
|
||||
| `contentType` | `"text"` for plain; `"markdown"` if `parse_mode = MarkdownV2`; `"image"` for `photo`; `"file"` for `document` |
|
||||
| `threadId` | `message.message_thread_id` (for supergroup topics) |
|
||||
| `replyToId` | Mosaic ID looked up from `message.reply_to_message.message_id` |
|
||||
| `attachments` | `photo`, `document`, `video` fields mapped to `ChannelAttachment` |
|
||||
| `timestamp` | `new Date(message.date * 1000)` |
|
||||
| `metadata` | `{ channelMessageId, chatType, fromUsername, forwardFrom }` |
|
||||
|
||||
**Outbound:** Adapter calls Telegram Bot API `sendMessage` with `parse_mode = MarkdownV2` for markdown content. For `contentType = "image"` or `"file"` it uses `sendPhoto` / `sendDocument`.
|
||||
|
||||
---
|
||||
|
||||
### TUI (Terminal UI)
|
||||
|
||||
The TUI adapter bridges Mosaic's terminal interface (`packages/cli`) to the channel protocol so that TUI sessions can be treated as a first-class channel.
|
||||
|
||||
| ChannelMessage field | TUI equivalent |
|
||||
| -------------------- | ------------------------------------------------------------------ |
|
||||
| `id` | Generated UUID (TUI has no native message IDs) |
|
||||
| `channelId` | `"tui:<conversationId>"` — the active conversation ID |
|
||||
| `senderId` | Authenticated Mosaic `userId` |
|
||||
| `senderType` | `"user"` for human input; `"agent"` for agent replies |
|
||||
| `content` | Raw text from stdin / agent output |
|
||||
| `contentType` | `"text"` for input; `"markdown"` for agent responses |
|
||||
| `threadId` | Not used (TUI sessions are linear) |
|
||||
| `replyToId` | Not used |
|
||||
| `attachments` | File paths dragged/pasted into the TUI; resolved to `file://` URLs |
|
||||
| `timestamp` | `new Date()` at the moment of send |
|
||||
| `metadata` | `{ conversationId, sessionId, ttyWidth, colorSupport }` |
|
||||
|
||||
**Outbound:** The adapter writes rendered content to stdout. Markdown is rendered via a terminal markdown renderer (e.g. `marked-terminal`). Code blocks are syntax-highlighted when `metadata.colorSupport = true`.
|
||||
|
||||
---
|
||||
|
||||
### WebUI
|
||||
|
||||
The WebUI adapter connects the Next.js frontend (`apps/web`) to the channel protocol over the existing Socket.IO gateway (`apps/gateway`).
|
||||
|
||||
| ChannelMessage field | WebUI equivalent |
|
||||
| -------------------- | ------------------------------------------------------------ |
|
||||
| `id` | Generated UUID; echoed back in the WebSocket event |
|
||||
| `channelId` | `"webui:<conversationId>"` |
|
||||
| `senderId` | Authenticated Mosaic `userId` |
|
||||
| `senderType` | `"user"` for browser input; `"agent"` for agent responses |
|
||||
| `content` | Message text from the input field |
|
||||
| `contentType` | `"text"` or `"markdown"` |
|
||||
| `threadId` | Not used (conversation model handles threading) |
|
||||
| `replyToId` | Message ID the user replied to (UI reply affordance) |
|
||||
| `attachments` | Files uploaded via the file picker; stored to object storage |
|
||||
| `timestamp` | `new Date()` at send, or server timestamp from event |
|
||||
| `metadata` | `{ conversationId, sessionId, clientTimezone, userAgent }` |
|
||||
|
||||
**Outbound:** Adapter emits a `chat:message` Socket.IO event. The WebUI React component receives it and appends to the conversation list. Markdown content is rendered client-side via the existing markdown renderer component.
|
||||
|
||||
---
|
||||
|
||||
## Identity Mapping
|
||||
|
||||
`mapIdentity(channelUserId)` resolves a channel-native user identifier to a Mosaic `userId`. This is required to attribute inbound messages to authenticated Mosaic accounts.
|
||||
|
||||
The implementation must query a `channel_identities` table (or equivalent) keyed on `(channel_name, channel_user_id)`. When no mapping exists the method returns `null` and the message is treated as anonymous (no Mosaic session context).
|
||||
|
||||
```
|
||||
channel_identities
|
||||
channel_name TEXT -- e.g. "matrix", "discord"
|
||||
channel_user_id TEXT -- channel-native user identifier
|
||||
mosaic_user_id TEXT -- FK to users.id
|
||||
linked_at TIMESTAMP
|
||||
PRIMARY KEY (channel_name, channel_user_id)
|
||||
```
|
||||
|
||||
Identity linking flows (OAuth dance, deep-link verification token, etc.) are out of scope for this document and will be specified in a separate identity-linking protocol document.
|
||||
|
||||
---
|
||||
|
||||
## Error Handling Conventions
|
||||
|
||||
- `connect()` must throw a structured error (subclass of `ChannelConnectError`) if the initial connection cannot be established within a reasonable timeout (default: 10 s).
|
||||
- `sendMessage()` must throw `ChannelSendError` on terminal failures (auth revoked, channel not found). Transient failures (rate limit, network blip) should be retried internally with exponential backoff before throwing.
|
||||
- `health()` must never throw — it returns `{ status: 'disconnected' }` on error.
|
||||
- Adapters must emit structured logs with `{ channel: adapter.name, event, ... }` metadata for observability.
|
||||
|
||||
---
|
||||
|
||||
## Versioning
|
||||
|
||||
The `ChannelMessage` protocol follows semantic versioning. Non-breaking field additions (new optional fields) are minor version bumps. Breaking changes (type changes, required field additions) require a major version bump and a migration guide.
|
||||
|
||||
Current version: **1.0.0**
|
||||
|
||||
---
|
||||
|
||||
## M7-003: Matrix Integration Design
|
||||
|
||||
### Homeserver Choice
|
||||
|
||||
Mosaic uses **Conduit** as the Matrix homeserver. Conduit is written in Rust, ships as a single binary, and has minimal operational overhead compared to Synapse or Dendrite. It supports the full Matrix Client-Server and Application Service APIs required by Mosaic.
|
||||
|
||||
Recommended deployment: Conduit runs as a Docker container alongside the Mosaic stack. A single Conduit instance is sufficient for most self-hosted deployments. Conduit's embedded RocksDB storage means no separate database is required for the homeserver itself.
|
||||
|
||||
### Appservice Registration
|
||||
|
||||
Mosaic registers with the Conduit homeserver as a Matrix **Application Service (appservice)**. This gives Mosaic the ability to:
|
||||
|
||||
- Create and control ghost users (virtual Matrix users representing Mosaic agents and provisioned accounts).
|
||||
- Receive all events sent to rooms within the appservice's namespace without polling.
|
||||
- Send events on behalf of ghost users without separate authentication.
|
||||
|
||||
Registration is done via a YAML registration file (`mosaic-appservice.yaml`) placed in Conduit's configuration directory:
|
||||
|
||||
```yaml
|
||||
id: mosaic
|
||||
url: http://gateway:3000/_matrix/appservice
|
||||
as_token: <random-secret>
|
||||
hs_token: <random-secret>
|
||||
sender_localpart: mosaic-bot
|
||||
namespaces:
|
||||
users:
|
||||
- exclusive: true
|
||||
regex: '@mosaic_.*:homeserver'
|
||||
rooms:
|
||||
- exclusive: false
|
||||
regex: '.*'
|
||||
aliases:
|
||||
- exclusive: true
|
||||
regex: '#mosaic-.*:homeserver'
|
||||
```
|
||||
|
||||
The gateway exposes `/_matrix/appservice` endpoints to receive push events from Conduit. The `as_token` and `hs_token` are stored in Vault and injected at startup.
|
||||
|
||||
### Room ↔ Conversation Mapping
|
||||
|
||||
Each Mosaic conversation maps to a single Matrix room. The mapping is stored in the database:
|
||||
|
||||
```
|
||||
conversation_matrix_rooms
|
||||
conversation_id TEXT -- FK to conversations.id
|
||||
room_id TEXT -- Matrix room ID (!roomid:homeserver)
|
||||
created_at TIMESTAMP
|
||||
PRIMARY KEY (conversation_id)
|
||||
```
|
||||
|
||||
Room creation is handled by the appservice on the first Matrix access to a conversation. Room names follow the pattern `Mosaic: <conversation title>`. Room topics contain the conversation ID for correlation.
|
||||
|
||||
When a conversation is deleted or archived in Mosaic, the corresponding Matrix room is tombstoned (m.room.tombstone event) and the room is left in a read-only state.
|
||||
|
||||
### Space ↔ Team Mapping
|
||||
|
||||
Each Mosaic team maps to a Matrix **Space**. Spaces are Matrix rooms with a special `m.space` type that can contain child rooms.
|
||||
|
||||
```
|
||||
team_matrix_spaces
|
||||
team_id TEXT -- FK to teams.id
|
||||
space_id TEXT -- Matrix room ID of the Space
|
||||
created_at TIMESTAMP
|
||||
PRIMARY KEY (team_id)
|
||||
```
|
||||
|
||||
When a conversation room is shared with a team, the appservice adds it to the team's Space via `m.space.child` state events. Removing the share removes the child relationship.
|
||||
|
||||
### Agent Ghost Users
|
||||
|
||||
Each Mosaic agent is represented in Matrix as an **appservice ghost user**:
|
||||
|
||||
- Matrix user ID format: `@mosaic_agent_<agentId>:homeserver`
|
||||
- Display name: the agent's human-readable name (e.g. "Mosaic Assistant")
|
||||
- Avatar: optional, configurable per agent
|
||||
|
||||
Ghost users are registered lazily — the appservice creates the ghost on first use. Ghost users are controlled exclusively by the appservice; they cannot log in via Matrix client credentials.
|
||||
|
||||
When an agent sends a message via the gateway, the Matrix adapter sends the event using `user_id` impersonation on the appservice's client endpoint, causing the message to appear as if sent by the ghost user.
|
||||
|
||||
### Power Levels
|
||||
|
||||
Power levels in each Mosaic-managed room are set as follows:
|
||||
|
||||
| Entity | Power Level | Rationale |
|
||||
| ------------------------------------- | -------------- | -------------------------------------- |
|
||||
| Mosaic appservice bot (`@mosaic-bot`) | 100 (Admin) | Room management and moderation |
|
||||
| Human Mosaic users | 50 (Moderator) | Can kick, redact, and invite |
|
||||
| Agent ghost users | 0 (Default) | Message-only; cannot modify room state |
|
||||
|
||||
This arrangement ensures human users retain full control. An agent cannot modify room settings, kick members, or take administrative actions. Humans with moderator power can redact agent messages and intervene in ongoing conversations.
|
||||
|
||||
```
|
||||
mermaid
|
||||
graph TD
|
||||
A[Mosaic Admin] -->|invites| B[Human User]
|
||||
B -->|joins| C[Matrix Room / Conversation]
|
||||
D[Agent Ghost User] -->|sends messages to| C
|
||||
B -->|can redact/kick| D
|
||||
E[Mosaic Bot] -->|manages room state| C
|
||||
style A fill:#4a9eff
|
||||
style B fill:#4a9eff
|
||||
style D fill:#aaaaaa
|
||||
style E fill:#ff9944
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## M7-004: Conversation Multiplexing
|
||||
|
||||
### Architecture Overview
|
||||
|
||||
A single Mosaic conversation can be accessed simultaneously from multiple surfaces: TUI, WebUI, and Matrix. The gateway is the **single source of truth** for all conversation state. Each surface is a thin client that renders gateway-owned data.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────┐
|
||||
│ Gateway (NestJS) │
|
||||
│ │
|
||||
│ ConversationService ←→ MessageBus │
|
||||
│ │ │ │
|
||||
│ [DB: PostgreSQL] [Fanout: Valkey Pub/Sub] │
|
||||
│ │ │
|
||||
│ ┌─────────────────────┼──────────────┐ │
|
||||
│ │ │ │ │
|
||||
│ Socket.IO Socket.IO Matrix │ │
|
||||
│ (TUI adapter) (WebUI adapter) (appservice)│ │
|
||||
└──────────┼─────────────────────┼──────────────┘ │
|
||||
│ │ │
|
||||
CLI/TUI Browser Matrix
|
||||
Client
|
||||
```
|
||||
|
||||
### Real-Time Sync Flow
|
||||
|
||||
1. A message arrives on any surface (TUI keystroke, browser send, Matrix event).
|
||||
2. The surface's adapter normalizes the message to `ChannelMessage` and delivers it to `ConversationService`.
|
||||
3. `ConversationService` persists the message to PostgreSQL, assigns a canonical `id`, and publishes a `message:new` event to the Valkey pub/sub channel keyed by `conversationId`.
|
||||
4. All active surfaces subscribed to that `conversationId` receive the fanout event and push it to their respective clients:
|
||||
- TUI adapter: writes rendered output to the connected terminal session.
|
||||
- WebUI adapter: emits a `chat:message` Socket.IO event to all browser sessions joined to that conversation.
|
||||
- Matrix adapter: sends an `m.room.message` event to the conversation's Matrix room.
|
||||
|
||||
This ensures that a message typed in the TUI appears in the browser and in Matrix within the same round-trip latency as the Valkey fanout (typically <10 ms on co-located infrastructure).
|
||||
|
||||
### Surface-to-Transport Mapping
|
||||
|
||||
| Surface | Transport to Gateway | Fanout Transport from Gateway |
|
||||
| ------- | ------------------------------------------ | ----------------------------- |
|
||||
| TUI | HTTPS REST + SSE or WebSocket | Socket.IO over stdio proxy |
|
||||
| WebUI | Socket.IO (browser) | Socket.IO emit |
|
||||
| Matrix | Matrix Client-Server API (appservice push) | Matrix `m.room.message` send |
|
||||
|
||||
### Conflict Resolution
|
||||
|
||||
- **Messages**: Append-only. Messages are never edited in-place in Mosaic's canonical store. Matrix edit events (`m.replace`) are treated as new messages with `replyToId` pointing to the original, preserving the full audit trail.
|
||||
- **Metadata (title, tags, archived state)**: Last-write-wins. The timestamp of the most recent write wins. Concurrent metadata updates from different surfaces are serialized through `ConversationService`; the final database write reflects the last persisted value.
|
||||
- **Conversation membership**: Set-merge semantics. Adding a user from any surface is additive. Removal requires an explicit delete action and is not overwritten by concurrent adds.
|
||||
|
||||
### Session Isolation
|
||||
|
||||
Multiple TUI sessions or browser tabs connected to the same conversation receive all fanout messages independently. Each session maintains its own scroll position and local ephemeral state (typing indicator, draft text). Gateway does not synchronize ephemeral state across sessions.
|
||||
|
||||
---
|
||||
|
||||
## M7-005: Remote Auth Bridging
|
||||
|
||||
### Overview
|
||||
|
||||
Matrix users authenticate to Mosaic by linking their Matrix identity to an existing Mosaic account. There are two flows: token linking (primary) and OAuth bridge (alternative). Once linked, the Matrix session is persistent — there is no periodic login/logout cycle.
|
||||
|
||||
### Token Linking Flow
|
||||
|
||||
1. A Mosaic admin or the user themselves generates a short-lived link token via the Mosaic web UI or API (`POST /auth/channel-link-token`). The token is a cryptographically random 32-byte hex string with a 15-minute TTL stored in Valkey.
|
||||
2. The user opens a Matrix client and sends a DM to `@mosaic-bot:homeserver`.
|
||||
3. The user sends the command: `!link <token>`
|
||||
4. The appservice receives the `m.room.message` event in the DM room, extracts the token, and calls `AuthService.linkChannelIdentity({ channel: 'matrix', channelUserId: matrixUserId, token })`.
|
||||
5. `AuthService` validates the token, retrieves the associated `mosaicUserId`, and writes a row to `channel_identities`.
|
||||
6. The appservice sends a confirmation reply in the DM room and invites the now-linked user to their personal Matrix Space.
|
||||
|
||||
```
|
||||
User (Matrix) @mosaic-bot Mosaic Gateway
|
||||
│ │ │
|
||||
│ DM: !link <token> │ │
|
||||
│────────────────────▶│ │
|
||||
│ │ POST /auth/link │
|
||||
│ │─────────────────────▶│
|
||||
│ │ 200 OK │
|
||||
│ │◀─────────────────────│
|
||||
│ ✓ Linked! Joining │ │
|
||||
│ your Space now │ │
|
||||
│◀────────────────────│ │
|
||||
```
|
||||
|
||||
### OAuth Bridge Flow
|
||||
|
||||
An alternative flow for users who prefer browser-based authentication:
|
||||
|
||||
1. The Mosaic bot sends the user a Matrix message containing an OAuth URL: `https://mosaic.example.com/auth/matrix-link?state=<nonce>&matrix_user=<encoded_mxid>`
|
||||
2. The user opens the URL in a browser. If not already logged in to Mosaic, they are redirected through the standard BetterAuth login flow.
|
||||
3. On successful authentication, Mosaic records the `channel_identities` row linking `matrix_user` to the authenticated `mosaicUserId`.
|
||||
4. The gateway sends a Matrix event to the pending DM room confirming the link.
|
||||
|
||||
### Invite-Based Provisioning
|
||||
|
||||
When a Mosaic admin adds a new user account, the provisioning flow optionally associates a Matrix user ID with the new account at creation time:
|
||||
|
||||
1. Admin provides `matrixUserId` when creating the account (`POST /admin/users`).
|
||||
2. `UserService` writes the `channel_identities` row immediately.
|
||||
3. The Matrix adapter's provisioning hook fires, and the appservice:
|
||||
- Creates the user's personal Matrix Space (if not already existing).
|
||||
- Sends an invite to the Matrix user for their personal Space.
|
||||
- Sends a welcome DM from `@mosaic-bot` with onboarding instructions.
|
||||
|
||||
The invited user does not need to complete any linking step — the association is pre-established by the admin.
|
||||
|
||||
### Session Lifecycle
|
||||
|
||||
Matrix sessions for linked users are persistent and long-lived. Unlike TUI sessions (which terminate when the terminal process exits), a Matrix user's access to their rooms remains intact as long as:
|
||||
|
||||
- Their Mosaic account is active (not suspended or deleted).
|
||||
- Their `channel_identities` row exists (link not revoked).
|
||||
- They remain members of the relevant Matrix rooms.
|
||||
|
||||
Revoking a Matrix link (`DELETE /auth/channel-link/matrix/<matrixUserId>`) removes the `channel_identities` row and causes `mapIdentity()` to return `null`. The appservice optionally kicks the Matrix user from all Mosaic-managed rooms as part of the revocation flow (configurable, default: off).
|
||||
|
||||
---
|
||||
|
||||
## M7-006: Agent-to-Agent Communication via Matrix
|
||||
|
||||
### Dedicated Agent Rooms
|
||||
|
||||
When two Mosaic agents need to coordinate, a dedicated Matrix room is created for their dialogue. This provides a persistent, auditable channel for structured inter-agent communication that humans can observe.
|
||||
|
||||
Room naming convention:
|
||||
|
||||
```
|
||||
#mosaic-agents-<agentA>-<agentB>:homeserver
|
||||
```
|
||||
|
||||
Where `agentA` and `agentB` are the Mosaic agent IDs sorted lexicographically (to ensure the same room is used regardless of which agent initiates). The room alias is registered by the appservice.
|
||||
|
||||
```
|
||||
agent_rooms
|
||||
room_id TEXT -- Matrix room ID
|
||||
agent_a_id TEXT -- FK to agents.id (lexicographically first)
|
||||
agent_b_id TEXT -- FK to agents.id (lexicographically second)
|
||||
created_at TIMESTAMP
|
||||
PRIMARY KEY (agent_a_id, agent_b_id)
|
||||
```
|
||||
|
||||
### Room Membership and Power Levels
|
||||
|
||||
| Entity | Power Level |
|
||||
| ---------------------------------- | ------------------------------------ |
|
||||
| Mosaic appservice bot | 100 (Admin) |
|
||||
| Human observers (invited) | 50 (Moderator, read-only by default) |
|
||||
| Agent ghost users (agentA, agentB) | 0 (Default — message send only) |
|
||||
|
||||
Humans are invited to agent rooms with a read-only intent. By convention, human messages in agent rooms are prefixed with `[HUMAN]` and treated as interrupts by the gateway. Agents are instructed (via system prompt) to pause and acknowledge human messages before resuming their dialogue.
|
||||
|
||||
### Message Format
|
||||
|
||||
Agents communicate using **structured JSON** embedded in Matrix event content. The Matrix event type is `m.room.message` with `msgtype: "m.text"` for compatibility. The structured payload is carried in a custom `mosaic.agent_message` field:
|
||||
|
||||
```json
|
||||
{
|
||||
"msgtype": "m.text",
|
||||
"body": "[Agent message — see mosaic.agent_message for structured content]",
|
||||
"mosaic.agent_message": {
|
||||
"schema_version": "1.0",
|
||||
"sender_agent_id": "agent_abc123",
|
||||
"conversation_id": "conv_xyz789",
|
||||
"message_type": "request",
|
||||
"payload": {
|
||||
"action": "summarize",
|
||||
"parameters": { "max_tokens": 500 },
|
||||
"reply_to_event_id": "$previousEventId"
|
||||
},
|
||||
"timestamp_ms": 1711234567890
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `body` field contains a human-readable fallback so the conversation is legible in any Matrix client. The structured payload is parsed exclusively by the gateway's Matrix adapter.
|
||||
|
||||
### Coordination Patterns
|
||||
|
||||
**Request/Response**: Agent A sends a `message_type: "request"` event. Agent B sends a `message_type: "response"` with `reply_to_event_id` referencing Agent A's event. The gateway correlates request/response pairs using the event IDs.
|
||||
|
||||
**Broadcast**: An agent sends a `message_type: "broadcast"` to a multi-agent room (more than two members). All agents in the room receive the event. No response is expected.
|
||||
|
||||
**Delegation**: Agent A sends a `message_type: "delegate"` with a `payload.task` object describing work to be handed off to Agent B. Agent B acknowledges with `message_type: "delegate_ack"` and later sends `message_type: "delegate_complete"` when done.
|
||||
|
||||
```
|
||||
AgentA Gateway AgentB
|
||||
│ delegate(task) │ │
|
||||
│────────────────────▶│ │
|
||||
│ │ Matrix event push │
|
||||
│ │────────────────────▶│
|
||||
│ │ delegate_ack │
|
||||
│ │◀────────────────────│
|
||||
│ │ [AgentB executes] │
|
||||
│ │ delegate_complete │
|
||||
│ │◀────────────────────│
|
||||
│ task result │ │
|
||||
│◀────────────────────│ │
|
||||
```
|
||||
|
||||
### Gateway Mediation
|
||||
|
||||
Agents do not call the Matrix Client-Server API directly. All inter-agent Matrix events are sent and received by the gateway's appservice. This means:
|
||||
|
||||
- The gateway can intercept, log, and rate-limit agent-to-agent messages.
|
||||
- Agents that are offline (no active process) still have their messages delivered; the gateway queues them and delivers on the agent's next activation.
|
||||
- The gateway can inject system messages (e.g. human interrupts, safety stops) into agent rooms without agent cooperation.
|
||||
|
||||
---
|
||||
|
||||
## M7-007: Multi-User Isolation in Matrix
|
||||
|
||||
### Space-per-Team Architecture
|
||||
|
||||
Isolation in Matrix is enforced through the Space hierarchy. Each organizational boundary in Mosaic maps to a distinct Matrix Space:
|
||||
|
||||
| Mosaic entity | Matrix Space | Visibility |
|
||||
| ----------------------------- | -------------- | ----------------- |
|
||||
| Personal workspace (per user) | Personal Space | User only |
|
||||
| Team | Team Space | Team members only |
|
||||
| Public project | (no Space) | Configurable |
|
||||
|
||||
Rooms (conversations) are placed into Spaces based on their sharing configuration. A room can appear in at most one team Space at a time. Moving a room from one team Space to another removes the `m.space.child` link from the old Space and adds it to the new one.
|
||||
|
||||
### Room Visibility Rules
|
||||
|
||||
Matrix room visibility within Conduit is controlled by:
|
||||
|
||||
1. **Join rules**: All Mosaic-managed rooms use `join_rule: invite`. Users cannot discover or join rooms without an explicit invite from the appservice.
|
||||
2. **Space membership**: Rooms appear in a Space's directory only to users who are members of that Space.
|
||||
3. **Room directory**: The server room directory is disabled for Mosaic-managed rooms (`m.room.history_visibility: shared` for team rooms, `m.room.history_visibility: invited` for personal rooms).
|
||||
|
||||
### Personal Space Defaults
|
||||
|
||||
When a user account is created (or linked to Matrix), the appservice provisions a personal Space:
|
||||
|
||||
- Space name: `<username>'s Space`
|
||||
- All conversations the user creates personally are added as children of their personal Space.
|
||||
- No other users are members of this Space by default.
|
||||
- Conversation rooms within the personal Space are only visible and accessible to the owner.
|
||||
|
||||
### Team Shared Rooms
|
||||
|
||||
When a project or conversation is shared with a team:
|
||||
|
||||
1. The appservice adds the room as a child of the team's Space (`m.space.child` state event in the Space room, `m.space.parent` state event in the conversation room).
|
||||
2. All current team members are invited to the conversation room.
|
||||
3. Newly added team members are automatically invited to all shared rooms in the team's Space by the appservice's team membership hook.
|
||||
4. If sharing is revoked, the appservice removes the `m.space.child` link and kicks all team members who joined via the team share (users who were directly invited are unaffected).
|
||||
|
||||
### Encryption
|
||||
|
||||
Encryption is optional and configured per room at creation time. Recommended defaults:
|
||||
|
||||
| Space type | Encryption default | Rationale |
|
||||
| -------------- | ------------------ | -------------------------------------- |
|
||||
| Personal Space | Enabled | Privacy-first for individual users |
|
||||
| Team Space | Disabled | Operational visibility; admin auditing |
|
||||
| Agent rooms | Disabled | Gateway must read structured payloads |
|
||||
|
||||
When encryption is enabled, the appservice's ghost users must participate in key exchange (using Matrix's Olm/Megolm protocol). The gateway holds the device keys for all ghost users it controls. This constraint means encrypted rooms require the gateway to be the E2E session holder — messages are end-to-end encrypted between human clients and gateway-held ghost device keys, not between human clients themselves.
|
||||
|
||||
### Admin Visibility
|
||||
|
||||
A Conduit server administrator can see:
|
||||
|
||||
- Room metadata: names, aliases, topic, membership list.
|
||||
- Unencrypted event content in unencrypted rooms.
|
||||
|
||||
A Conduit server administrator **cannot** see:
|
||||
|
||||
- Content of encrypted rooms (without holding a device key for a room member).
|
||||
|
||||
Mosaic does not grant gateway admin credentials to application-level admin users. The Conduit admin interface is restricted to infrastructure operators. Application-level admins manage users and rooms through the Mosaic API, which interacts with the appservice layer only.
|
||||
|
||||
### Data Retention
|
||||
|
||||
Matrix events in Mosaic-managed rooms follow Mosaic's configurable retention policy:
|
||||
|
||||
```
|
||||
room_retention_policies
|
||||
room_id TEXT -- Matrix room ID (or wildcard pattern)
|
||||
retention_days INT -- NULL = keep forever
|
||||
applies_to TEXT -- "personal" | "team" | "agent" | "all"
|
||||
created_at TIMESTAMP
|
||||
```
|
||||
|
||||
The retention policy is enforced by a background job in the gateway that calls Conduit's admin API to purge events older than the configured threshold. Purged events are removed from the Conduit store but Mosaic's PostgreSQL message store retains the canonical `ChannelMessage` record unless the Mosaic retention policy also covers it.
|
||||
|
||||
Default retention values:
|
||||
|
||||
| Room type | Default retention |
|
||||
| --------------------------- | ------------------- |
|
||||
| Personal conversation rooms | 365 days |
|
||||
| Team conversation rooms | 730 days |
|
||||
| Agent-to-agent rooms | 90 days |
|
||||
| System/audit rooms | 1825 days (5 years) |
|
||||
|
||||
Retention settings are configurable by Mosaic admins via the admin API and apply to both the Matrix event store and the Mosaic message store in lockstep.
|
||||
140
pnpm-lock.yaml
generated
140
pnpm-lock.yaml
generated
@@ -131,6 +131,9 @@ importers:
|
||||
better-auth:
|
||||
specifier: ^1.5.5
|
||||
version: 1.5.5(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.15.6)(kysely@0.28.11)(postgres@3.4.8))(mongodb@7.1.0(socks@2.8.7))(next@16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@2.1.9(@types/node@22.19.15)(jsdom@29.0.0(@noble/hashes@2.0.1))(lightningcss@1.31.1))
|
||||
bullmq:
|
||||
specifier: ^5.71.0
|
||||
version: 5.71.0
|
||||
class-transformer:
|
||||
specifier: ^0.5.1
|
||||
version: 0.5.1
|
||||
@@ -1737,6 +1740,9 @@ packages:
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@ioredis/commands@1.5.0':
|
||||
resolution: {integrity: sha512-eUgLqrMf8nJkZxT24JvVRrQya1vZkQh8BBeYNwGDqa5I0VUi8ACx7uFvAaLxintokpTenkK6DASvo/bvNbBGow==}
|
||||
|
||||
'@ioredis/commands@1.5.1':
|
||||
resolution: {integrity: sha512-JH8ZL/ywcJyR9MmJ5BNqZllXNZQqQbnVZOqpPQqE1vHiFgAw4NHbvE0FOduNU8IX9babitBT46571OnPTT0Zcw==}
|
||||
|
||||
@@ -1868,6 +1874,36 @@ packages:
|
||||
'@mongodb-js/saslprep@1.4.6':
|
||||
resolution: {integrity: sha512-y+x3H1xBZd38n10NZF/rEBlvDOOMQ6LKUTHqr8R9VkJ+mmQOYtJFxIlkkK8fZrtOiL6VixbOBWMbZGBdal3Z1g==}
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3':
|
||||
resolution: {integrity: sha512-QZHtlVgbAdy2zAqNA9Gu1UpIuI8Xvsd1v8ic6B2pZmeFnFcMWiPLfWXh7TVw4eGEZ/C9TH281KwhVoeQUKbyjw==}
|
||||
cpu: [arm64]
|
||||
os: [darwin]
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3':
|
||||
resolution: {integrity: sha512-mdzd3AVzYKuUmiWOQ8GNhl64/IoFGol569zNRdkLReh6LRLHOXxU4U8eq0JwaD8iFHdVGqSy4IjFL4reoWCDFw==}
|
||||
cpu: [x64]
|
||||
os: [darwin]
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3':
|
||||
resolution: {integrity: sha512-YxQL+ax0XqBJDZiKimS2XQaf+2wDGVa1enVRGzEvLLVFeqa5kx2bWbtcSXgsxjQB7nRqqIGFIcLteF/sHeVtQg==}
|
||||
cpu: [arm64]
|
||||
os: [linux]
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3':
|
||||
resolution: {integrity: sha512-fg0uy/dG/nZEXfYilKoRe7yALaNmHoYeIoJuJ7KJ+YyU2bvY8vPv27f7UKhGRpY6euFYqEVhxCFZgAUNQBM3nw==}
|
||||
cpu: [arm]
|
||||
os: [linux]
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3':
|
||||
resolution: {integrity: sha512-cvwNfbP07pKUfq1uH+S6KJ7dT9K8WOE4ZiAcsrSes+UY55E/0jLYc+vq+DO7jlmqRb5zAggExKm0H7O/CBaesg==}
|
||||
cpu: [x64]
|
||||
os: [linux]
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3':
|
||||
resolution: {integrity: sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ==}
|
||||
cpu: [x64]
|
||||
os: [win32]
|
||||
|
||||
'@nestjs/common@11.1.16':
|
||||
resolution: {integrity: sha512-JSIeW+USuMJkkcNbiOdcPkVCeI3TSnXstIVEPpp3HiaKnPRuSbUUKm9TY9o/XpIcPHWUOQItAtC5BiAwFdVITQ==}
|
||||
peerDependencies:
|
||||
@@ -3387,6 +3423,9 @@ packages:
|
||||
buffer-from@1.1.2:
|
||||
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
|
||||
|
||||
bullmq@5.71.0:
|
||||
resolution: {integrity: sha512-aeNWh4drsafSKnAJeiNH/nZP/5O8ZdtdMbnOPZmpjXj7NZUP5YC901U3bIH41iZValm7d1i3c34ojv7q31m30w==}
|
||||
|
||||
bytes@3.1.2:
|
||||
resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==}
|
||||
engines: {node: '>= 0.8'}
|
||||
@@ -3553,6 +3592,10 @@ packages:
|
||||
resolution: {integrity: sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==}
|
||||
engines: {node: '>= 0.10'}
|
||||
|
||||
cron-parser@4.9.0:
|
||||
resolution: {integrity: sha512-p0SaNjrHOnQeR8/VnfGbmg9te2kfyYSQ7Sc/j/6DtPL3JQvKxmjO9TSjNFpujqV3vEYYBvNNvXSxzyksBWAx1Q==}
|
||||
engines: {node: '>=12.0.0'}
|
||||
|
||||
cross-spawn@7.0.6:
|
||||
resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
|
||||
engines: {node: '>= 8'}
|
||||
@@ -4274,6 +4317,10 @@ packages:
|
||||
resolution: {integrity: sha512-HVBe9OFuqs+Z6n64q09PQvP1/R4Bm+30PAyyD4wIEqssh3v9L21QjCVk4kRLucMBcDokJTcLjsGeVRlq/nH6DA==}
|
||||
engines: {node: '>=12.22.0'}
|
||||
|
||||
ioredis@5.9.3:
|
||||
resolution: {integrity: sha512-VI5tMCdeoxZWU5vjHWsiE/Su76JGhBvWF1MJnV9ZtGltHk9BmD48oDq8Tj8haZ85aceXZMxLNDQZRVo5QKNgXA==}
|
||||
engines: {node: '>=12.22.0'}
|
||||
|
||||
ip-address@10.1.0:
|
||||
resolution: {integrity: sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==}
|
||||
engines: {node: '>= 12'}
|
||||
@@ -4566,6 +4613,10 @@ packages:
|
||||
resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
luxon@3.7.2:
|
||||
resolution: {integrity: sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==}
|
||||
engines: {node: '>=12'}
|
||||
|
||||
magic-bytes.js@1.13.0:
|
||||
resolution: {integrity: sha512-afO2mnxW7GDTXMm5/AoN1WuOcdoKhtgXjIvHmobqTD1grNplhGdv3PFOyjCVmrnOZBIT/gD/koDKpYG+0mvHcg==}
|
||||
|
||||
@@ -4773,6 +4824,13 @@ packages:
|
||||
ms@2.1.3:
|
||||
resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==}
|
||||
|
||||
msgpackr-extract@3.0.3:
|
||||
resolution: {integrity: sha512-P0efT1C9jIdVRefqjzOQ9Xml57zpOXnIuS+csaB4MdZbTdmGDLo8XhzBG1N7aO11gKDDkJvBLULeFTo46wwreA==}
|
||||
hasBin: true
|
||||
|
||||
msgpackr@1.11.5:
|
||||
resolution: {integrity: sha512-UjkUHN0yqp9RWKy0Lplhh+wlpdt9oQBYgULZOiFhV3VclSF1JnSQWZ5r9gORQlNYaUKQoR8itv7g7z1xDDuACA==}
|
||||
|
||||
mz@2.7.0:
|
||||
resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==}
|
||||
|
||||
@@ -4821,6 +4879,9 @@ packages:
|
||||
sass:
|
||||
optional: true
|
||||
|
||||
node-abort-controller@3.1.1:
|
||||
resolution: {integrity: sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==}
|
||||
|
||||
node-cron@4.2.1:
|
||||
resolution: {integrity: sha512-lgimEHPE/QDgFlywTd8yTR61ptugX3Qer29efeyWw2rv259HtGBNn1vZVmp8lB9uo9wC0t/AT4iGqXxia+CJFg==}
|
||||
engines: {node: '>=6.0.0'}
|
||||
@@ -4843,6 +4904,10 @@ packages:
|
||||
resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==}
|
||||
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
|
||||
|
||||
node-gyp-build-optional-packages@5.2.2:
|
||||
resolution: {integrity: sha512-s+w+rBWnpTMwSFbaE0UXsRlg7hU4FjekKU4eyAih5T8nJuNZT1nNsskXpxmeqSK9UzkBl6UgRlnKc8hz8IEqOw==}
|
||||
hasBin: true
|
||||
|
||||
npm-run-path@5.3.0:
|
||||
resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==}
|
||||
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
|
||||
@@ -7039,6 +7104,8 @@ snapshots:
|
||||
'@img/sharp-win32-x64@0.34.5':
|
||||
optional: true
|
||||
|
||||
'@ioredis/commands@1.5.0': {}
|
||||
|
||||
'@ioredis/commands@1.5.1': {}
|
||||
|
||||
'@isaacs/cliui@8.0.2':
|
||||
@@ -7235,6 +7302,24 @@ snapshots:
|
||||
dependencies:
|
||||
sparse-bitfield: 3.0.3
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-darwin-arm64@3.0.3':
|
||||
optional: true
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-darwin-x64@3.0.3':
|
||||
optional: true
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-linux-arm64@3.0.3':
|
||||
optional: true
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-linux-arm@3.0.3':
|
||||
optional: true
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-linux-x64@3.0.3':
|
||||
optional: true
|
||||
|
||||
'@msgpackr-extract/msgpackr-extract-win32-x64@3.0.3':
|
||||
optional: true
|
||||
|
||||
'@nestjs/common@11.1.16(class-transformer@0.5.1)(class-validator@0.15.1)(reflect-metadata@0.2.2)(rxjs@7.8.2)':
|
||||
dependencies:
|
||||
file-type: 21.3.0
|
||||
@@ -8977,6 +9062,18 @@ snapshots:
|
||||
|
||||
buffer-from@1.1.2: {}
|
||||
|
||||
bullmq@5.71.0:
|
||||
dependencies:
|
||||
cron-parser: 4.9.0
|
||||
ioredis: 5.9.3
|
||||
msgpackr: 1.11.5
|
||||
node-abort-controller: 3.1.1
|
||||
semver: 7.7.4
|
||||
tslib: 2.8.1
|
||||
uuid: 11.1.0
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
bytes@3.1.2: {}
|
||||
|
||||
cac@6.7.14: {}
|
||||
@@ -9115,6 +9212,10 @@ snapshots:
|
||||
object-assign: 4.1.1
|
||||
vary: 1.1.2
|
||||
|
||||
cron-parser@4.9.0:
|
||||
dependencies:
|
||||
luxon: 3.7.2
|
||||
|
||||
cross-spawn@7.0.6:
|
||||
dependencies:
|
||||
path-key: 3.1.1
|
||||
@@ -9997,6 +10098,20 @@ snapshots:
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
ioredis@5.9.3:
|
||||
dependencies:
|
||||
'@ioredis/commands': 1.5.0
|
||||
cluster-key-slot: 1.1.2
|
||||
debug: 4.4.3
|
||||
denque: 2.1.0
|
||||
lodash.defaults: 4.2.0
|
||||
lodash.isarguments: 3.1.0
|
||||
redis-errors: 1.2.0
|
||||
redis-parser: 3.0.0
|
||||
standard-as-callback: 2.1.0
|
||||
transitivePeerDependencies:
|
||||
- supports-color
|
||||
|
||||
ip-address@10.1.0: {}
|
||||
|
||||
ipaddr.js@1.9.1: {}
|
||||
@@ -10261,6 +10376,8 @@ snapshots:
|
||||
|
||||
lru-cache@7.18.3: {}
|
||||
|
||||
luxon@3.7.2: {}
|
||||
|
||||
magic-bytes.js@1.13.0: {}
|
||||
|
||||
magic-string@0.30.21:
|
||||
@@ -10559,6 +10676,22 @@ snapshots:
|
||||
|
||||
ms@2.1.3: {}
|
||||
|
||||
msgpackr-extract@3.0.3:
|
||||
dependencies:
|
||||
node-gyp-build-optional-packages: 5.2.2
|
||||
optionalDependencies:
|
||||
'@msgpackr-extract/msgpackr-extract-darwin-arm64': 3.0.3
|
||||
'@msgpackr-extract/msgpackr-extract-darwin-x64': 3.0.3
|
||||
'@msgpackr-extract/msgpackr-extract-linux-arm': 3.0.3
|
||||
'@msgpackr-extract/msgpackr-extract-linux-arm64': 3.0.3
|
||||
'@msgpackr-extract/msgpackr-extract-linux-x64': 3.0.3
|
||||
'@msgpackr-extract/msgpackr-extract-win32-x64': 3.0.3
|
||||
optional: true
|
||||
|
||||
msgpackr@1.11.5:
|
||||
optionalDependencies:
|
||||
msgpackr-extract: 3.0.3
|
||||
|
||||
mz@2.7.0:
|
||||
dependencies:
|
||||
any-promise: 1.3.0
|
||||
@@ -10603,6 +10736,8 @@ snapshots:
|
||||
- '@babel/core'
|
||||
- babel-plugin-macros
|
||||
|
||||
node-abort-controller@3.1.1: {}
|
||||
|
||||
node-cron@4.2.1: {}
|
||||
|
||||
node-domexception@1.0.0: {}
|
||||
@@ -10617,6 +10752,11 @@ snapshots:
|
||||
fetch-blob: 3.2.0
|
||||
formdata-polyfill: 4.0.10
|
||||
|
||||
node-gyp-build-optional-packages@5.2.2:
|
||||
dependencies:
|
||||
detect-libc: 2.1.2
|
||||
optional: true
|
||||
|
||||
npm-run-path@5.3.0:
|
||||
dependencies:
|
||||
path-key: 4.0.0
|
||||
|
||||
Reference in New Issue
Block a user