merge: resolve conflicts with develop (M10-Telemetry + M12-MatrixBridge)
All checks were successful
ci/woodpecker/push/infra Pipeline was successful
ci/woodpecker/push/coordinator Pipeline was successful
ci/woodpecker/push/orchestrator Pipeline was successful
ci/woodpecker/push/api Pipeline was successful
ci/woodpecker/push/web Pipeline was successful

Merge origin/develop into feature/m13-speech-services to incorporate
M10-Telemetry and M12-MatrixBridge changes. Resolved 4 conflicts:
- .env.example: Added speech config alongside telemetry + matrix config
- Makefile: Added speech targets alongside matrix targets
- app.module.ts: Import both MosaicTelemetryModule and SpeechModule
- docs/tasks.md: Combined all milestone task tracking sections

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-15 12:31:08 -06:00
68 changed files with 12466 additions and 358 deletions

View File

@@ -37,6 +37,7 @@ import { JobStepsModule } from "./job-steps/job-steps.module";
import { CoordinatorIntegrationModule } from "./coordinator-integration/coordinator-integration.module";
import { FederationModule } from "./federation/federation.module";
import { CredentialsModule } from "./credentials/credentials.module";
import { MosaicTelemetryModule } from "./mosaic-telemetry";
import { SpeechModule } from "./speech/speech.module";
import { RlsContextInterceptor } from "./common/interceptors/rls-context.interceptor";
@@ -98,6 +99,7 @@ import { RlsContextInterceptor } from "./common/interceptors/rls-context.interce
CoordinatorIntegrationModule,
FederationModule,
CredentialsModule,
MosaicTelemetryModule,
SpeechModule,
],
controllers: [AppController, CsrfController],

View File

@@ -0,0 +1,15 @@
/**
* Bridge Module Constants
*
* Injection tokens for the bridge module.
*/
/**
* Injection token for the array of active IChatProvider instances.
*
* Use this token to inject all configured chat providers:
* ```
* @Inject(CHAT_PROVIDERS) private readonly chatProviders: IChatProvider[]
* ```
*/
export const CHAT_PROVIDERS = "CHAT_PROVIDERS";

View File

@@ -1,10 +1,13 @@
import { Test, TestingModule } from "@nestjs/testing";
import { BridgeModule } from "./bridge.module";
import { DiscordService } from "./discord/discord.service";
import { MatrixService } from "./matrix/matrix.service";
import { StitcherService } from "../stitcher/stitcher.service";
import { PrismaService } from "../prisma/prisma.service";
import { BullMqService } from "../bullmq/bullmq.service";
import { describe, it, expect, beforeEach, vi } from "vitest";
import { CHAT_PROVIDERS } from "./bridge.constants";
import type { IChatProvider } from "./interfaces";
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
// Mock discord.js
const mockReadyCallbacks: Array<() => void> = [];
@@ -53,20 +56,93 @@ vi.mock("discord.js", () => {
};
});
describe("BridgeModule", () => {
let module: TestingModule;
// Mock matrix-bot-sdk
vi.mock("matrix-bot-sdk", () => {
return {
MatrixClient: class MockMatrixClient {
start = vi.fn().mockResolvedValue(undefined);
stop = vi.fn();
on = vi.fn();
sendMessage = vi.fn().mockResolvedValue("$mock-event-id");
},
SimpleFsStorageProvider: class MockStorage {
constructor(_path: string) {
// no-op
}
},
AutojoinRoomsMixin: {
setupOnClient: vi.fn(),
},
};
});
beforeEach(async () => {
// Set environment variables
process.env.DISCORD_BOT_TOKEN = "test-token";
process.env.DISCORD_GUILD_ID = "test-guild-id";
process.env.DISCORD_CONTROL_CHANNEL_ID = "test-channel-id";
/**
* Saved environment variables to restore after each test
*/
interface SavedEnvVars {
DISCORD_BOT_TOKEN?: string;
DISCORD_GUILD_ID?: string;
DISCORD_CONTROL_CHANNEL_ID?: string;
MATRIX_ACCESS_TOKEN?: string;
MATRIX_HOMESERVER_URL?: string;
MATRIX_BOT_USER_ID?: string;
MATRIX_CONTROL_ROOM_ID?: string;
MATRIX_WORKSPACE_ID?: string;
ENCRYPTION_KEY?: string;
}
describe("BridgeModule", () => {
let savedEnv: SavedEnvVars;
beforeEach(() => {
// Save current env vars
savedEnv = {
DISCORD_BOT_TOKEN: process.env.DISCORD_BOT_TOKEN,
DISCORD_GUILD_ID: process.env.DISCORD_GUILD_ID,
DISCORD_CONTROL_CHANNEL_ID: process.env.DISCORD_CONTROL_CHANNEL_ID,
MATRIX_ACCESS_TOKEN: process.env.MATRIX_ACCESS_TOKEN,
MATRIX_HOMESERVER_URL: process.env.MATRIX_HOMESERVER_URL,
MATRIX_BOT_USER_ID: process.env.MATRIX_BOT_USER_ID,
MATRIX_CONTROL_ROOM_ID: process.env.MATRIX_CONTROL_ROOM_ID,
MATRIX_WORKSPACE_ID: process.env.MATRIX_WORKSPACE_ID,
ENCRYPTION_KEY: process.env.ENCRYPTION_KEY,
};
// Clear all bridge env vars
delete process.env.DISCORD_BOT_TOKEN;
delete process.env.DISCORD_GUILD_ID;
delete process.env.DISCORD_CONTROL_CHANNEL_ID;
delete process.env.MATRIX_ACCESS_TOKEN;
delete process.env.MATRIX_HOMESERVER_URL;
delete process.env.MATRIX_BOT_USER_ID;
delete process.env.MATRIX_CONTROL_ROOM_ID;
delete process.env.MATRIX_WORKSPACE_ID;
// Set encryption key (needed by StitcherService)
process.env.ENCRYPTION_KEY = "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef";
// Clear ready callbacks
mockReadyCallbacks.length = 0;
module = await Test.createTestingModule({
vi.clearAllMocks();
});
afterEach(() => {
// Restore env vars
for (const [key, value] of Object.entries(savedEnv)) {
if (value === undefined) {
delete process.env[key];
} else {
process.env[key] = value;
}
}
});
/**
* Helper to compile a test module with BridgeModule
*/
async function compileModule(): Promise<TestingModule> {
return Test.createTestingModule({
imports: [BridgeModule],
})
.overrideProvider(PrismaService)
@@ -74,24 +150,144 @@ describe("BridgeModule", () => {
.overrideProvider(BullMqService)
.useValue({})
.compile();
}
// Clear all mocks
vi.clearAllMocks();
/**
* Helper to set Discord env vars
*/
function setDiscordEnv(): void {
process.env.DISCORD_BOT_TOKEN = "test-discord-token";
process.env.DISCORD_GUILD_ID = "test-guild-id";
process.env.DISCORD_CONTROL_CHANNEL_ID = "test-channel-id";
}
/**
* Helper to set Matrix env vars
*/
function setMatrixEnv(): void {
process.env.MATRIX_ACCESS_TOKEN = "test-matrix-token";
process.env.MATRIX_HOMESERVER_URL = "https://matrix.example.com";
process.env.MATRIX_BOT_USER_ID = "@bot:example.com";
process.env.MATRIX_CONTROL_ROOM_ID = "!room:example.com";
process.env.MATRIX_WORKSPACE_ID = "test-workspace-id";
}
describe("with both Discord and Matrix configured", () => {
let module: TestingModule;
beforeEach(async () => {
setDiscordEnv();
setMatrixEnv();
module = await compileModule();
});
it("should compile the module", () => {
expect(module).toBeDefined();
});
it("should provide DiscordService", () => {
const discordService = module.get<DiscordService>(DiscordService);
expect(discordService).toBeDefined();
expect(discordService).toBeInstanceOf(DiscordService);
});
it("should provide MatrixService", () => {
const matrixService = module.get<MatrixService>(MatrixService);
expect(matrixService).toBeDefined();
expect(matrixService).toBeInstanceOf(MatrixService);
});
it("should provide CHAT_PROVIDERS with both providers", () => {
const chatProviders = module.get<IChatProvider[]>(CHAT_PROVIDERS);
expect(chatProviders).toBeDefined();
expect(chatProviders).toHaveLength(2);
expect(chatProviders[0]).toBeInstanceOf(DiscordService);
expect(chatProviders[1]).toBeInstanceOf(MatrixService);
});
it("should provide StitcherService via StitcherModule", () => {
const stitcherService = module.get<StitcherService>(StitcherService);
expect(stitcherService).toBeDefined();
expect(stitcherService).toBeInstanceOf(StitcherService);
});
});
it("should be defined", () => {
expect(module).toBeDefined();
describe("with only Discord configured", () => {
let module: TestingModule;
beforeEach(async () => {
setDiscordEnv();
module = await compileModule();
});
it("should compile the module", () => {
expect(module).toBeDefined();
});
it("should provide DiscordService", () => {
const discordService = module.get<DiscordService>(DiscordService);
expect(discordService).toBeDefined();
expect(discordService).toBeInstanceOf(DiscordService);
});
it("should provide CHAT_PROVIDERS with only Discord", () => {
const chatProviders = module.get<IChatProvider[]>(CHAT_PROVIDERS);
expect(chatProviders).toBeDefined();
expect(chatProviders).toHaveLength(1);
expect(chatProviders[0]).toBeInstanceOf(DiscordService);
});
});
it("should provide DiscordService", () => {
const discordService = module.get<DiscordService>(DiscordService);
expect(discordService).toBeDefined();
expect(discordService).toBeInstanceOf(DiscordService);
describe("with only Matrix configured", () => {
let module: TestingModule;
beforeEach(async () => {
setMatrixEnv();
module = await compileModule();
});
it("should compile the module", () => {
expect(module).toBeDefined();
});
it("should provide MatrixService", () => {
const matrixService = module.get<MatrixService>(MatrixService);
expect(matrixService).toBeDefined();
expect(matrixService).toBeInstanceOf(MatrixService);
});
it("should provide CHAT_PROVIDERS with only Matrix", () => {
const chatProviders = module.get<IChatProvider[]>(CHAT_PROVIDERS);
expect(chatProviders).toBeDefined();
expect(chatProviders).toHaveLength(1);
expect(chatProviders[0]).toBeInstanceOf(MatrixService);
});
});
it("should provide StitcherService", () => {
const stitcherService = module.get<StitcherService>(StitcherService);
expect(stitcherService).toBeDefined();
expect(stitcherService).toBeInstanceOf(StitcherService);
describe("with neither bridge configured", () => {
let module: TestingModule;
beforeEach(async () => {
// No env vars set for either bridge
module = await compileModule();
});
it("should compile the module without errors", () => {
expect(module).toBeDefined();
});
it("should provide CHAT_PROVIDERS as an empty array", () => {
const chatProviders = module.get<IChatProvider[]>(CHAT_PROVIDERS);
expect(chatProviders).toBeDefined();
expect(chatProviders).toHaveLength(0);
expect(Array.isArray(chatProviders)).toBe(true);
});
});
describe("CHAT_PROVIDERS token", () => {
it("should be a string constant", () => {
expect(CHAT_PROVIDERS).toBe("CHAT_PROVIDERS");
expect(typeof CHAT_PROVIDERS).toBe("string");
});
});
});

View File

@@ -1,16 +1,81 @@
import { Module } from "@nestjs/common";
import { Logger, Module } from "@nestjs/common";
import { DiscordService } from "./discord/discord.service";
import { MatrixService } from "./matrix/matrix.service";
import { MatrixRoomService } from "./matrix/matrix-room.service";
import { MatrixStreamingService } from "./matrix/matrix-streaming.service";
import { CommandParserService } from "./parser/command-parser.service";
import { StitcherModule } from "../stitcher/stitcher.module";
import { CHAT_PROVIDERS } from "./bridge.constants";
import type { IChatProvider } from "./interfaces";
const logger = new Logger("BridgeModule");
/**
* Bridge Module - Chat platform integrations
*
* Provides integration with chat platforms (Discord, Slack, Matrix, etc.)
* Provides integration with chat platforms (Discord, Matrix, etc.)
* for controlling Mosaic Stack via chat commands.
*
* Both services are always registered as providers, but the CHAT_PROVIDERS
* injection token only includes bridges whose environment variables are set:
* - Discord: included when DISCORD_BOT_TOKEN is set
* - Matrix: included when MATRIX_ACCESS_TOKEN is set
*
* Both bridges can run simultaneously, and no error occurs if neither is configured.
* Consumers should inject CHAT_PROVIDERS for bridge-agnostic access to all active providers.
*
* CommandParserService provides shared, platform-agnostic command parsing.
* MatrixRoomService handles workspace-to-Matrix-room mapping.
*/
@Module({
imports: [StitcherModule],
providers: [DiscordService],
exports: [DiscordService],
providers: [
CommandParserService,
MatrixRoomService,
MatrixStreamingService,
DiscordService,
MatrixService,
{
provide: CHAT_PROVIDERS,
useFactory: (discord: DiscordService, matrix: MatrixService): IChatProvider[] => {
const providers: IChatProvider[] = [];
if (process.env.DISCORD_BOT_TOKEN) {
providers.push(discord);
logger.log("Discord bridge enabled (DISCORD_BOT_TOKEN detected)");
}
if (process.env.MATRIX_ACCESS_TOKEN) {
const missingVars = [
"MATRIX_HOMESERVER_URL",
"MATRIX_BOT_USER_ID",
"MATRIX_WORKSPACE_ID",
].filter((v) => !process.env[v]);
if (missingVars.length > 0) {
logger.warn(
`Matrix bridge enabled but missing: ${missingVars.join(", ")}. connect() will fail.`
);
}
providers.push(matrix);
logger.log("Matrix bridge enabled (MATRIX_ACCESS_TOKEN detected)");
}
if (providers.length === 0) {
logger.warn("No chat bridges configured. Set DISCORD_BOT_TOKEN or MATRIX_ACCESS_TOKEN.");
}
return providers;
},
inject: [DiscordService, MatrixService],
},
],
exports: [
DiscordService,
MatrixService,
MatrixRoomService,
MatrixStreamingService,
CommandParserService,
CHAT_PROVIDERS,
],
})
export class BridgeModule {}

View File

@@ -187,6 +187,7 @@ describe("DiscordService", () => {
await service.connect();
await service.sendThreadMessage({
threadId: "thread-123",
channelId: "test-channel-id",
content: "Step completed",
});

View File

@@ -305,6 +305,7 @@ export class DiscordService implements IChatProvider {
// Send confirmation to thread
await this.sendThreadMessage({
threadId,
channelId: message.channelId,
content: `Job created: ${result.jobId}\nStatus: ${result.status}\nQueue: ${result.queueName}`,
});
}

View File

@@ -28,6 +28,7 @@ export interface ThreadCreateOptions {
export interface ThreadMessageOptions {
threadId: string;
channelId: string;
content: string;
}
@@ -76,4 +77,17 @@ export interface IChatProvider {
* Parse a command from a message
*/
parseCommand(message: ChatMessage): ChatCommand | null;
/**
* Edit an existing message in a channel.
*
* Optional method for providers that support message editing
* (e.g., Matrix via m.replace, Discord via message.edit).
* Used for streaming AI responses with incremental updates.
*
* @param channelId - The channel/room ID
* @param messageId - The original message/event ID to edit
* @param content - The updated message content
*/
editMessage?(channelId: string, messageId: string, content: string): Promise<void>;
}

View File

@@ -0,0 +1,4 @@
export { MatrixService } from "./matrix.service";
export { MatrixRoomService } from "./matrix-room.service";
export { MatrixStreamingService } from "./matrix-streaming.service";
export type { StreamResponseOptions } from "./matrix-streaming.service";

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,212 @@
import { Test, TestingModule } from "@nestjs/testing";
import { MatrixRoomService } from "./matrix-room.service";
import { MatrixService } from "./matrix.service";
import { PrismaService } from "../../prisma/prisma.service";
import { vi, describe, it, expect, beforeEach } from "vitest";
// Mock matrix-bot-sdk to avoid native module import errors
vi.mock("matrix-bot-sdk", () => {
return {
MatrixClient: class MockMatrixClient {},
SimpleFsStorageProvider: class MockStorageProvider {
constructor(_filename: string) {
// No-op for testing
}
},
AutojoinRoomsMixin: {
setupOnClient: vi.fn(),
},
};
});
describe("MatrixRoomService", () => {
let service: MatrixRoomService;
const mockCreateRoom = vi.fn().mockResolvedValue("!new-room:example.com");
const mockMatrixClient = {
createRoom: mockCreateRoom,
};
const mockMatrixService = {
isConnected: vi.fn().mockReturnValue(true),
getClient: vi.fn().mockReturnValue(mockMatrixClient),
};
const mockPrismaService = {
workspace: {
findUnique: vi.fn(),
findFirst: vi.fn(),
update: vi.fn(),
},
};
beforeEach(async () => {
process.env.MATRIX_SERVER_NAME = "example.com";
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixRoomService,
{
provide: PrismaService,
useValue: mockPrismaService,
},
{
provide: MatrixService,
useValue: mockMatrixService,
},
],
}).compile();
service = module.get<MatrixRoomService>(MatrixRoomService);
vi.clearAllMocks();
// Restore defaults after clearing
mockMatrixService.isConnected.mockReturnValue(true);
mockCreateRoom.mockResolvedValue("!new-room:example.com");
mockPrismaService.workspace.update.mockResolvedValue({});
});
describe("provisionRoom", () => {
it("should create a Matrix room and store the mapping", async () => {
const roomId = await service.provisionRoom(
"workspace-uuid-1",
"My Workspace",
"my-workspace"
);
expect(roomId).toBe("!new-room:example.com");
expect(mockCreateRoom).toHaveBeenCalledWith({
name: "Mosaic: My Workspace",
room_alias_name: "mosaic-my-workspace",
topic: "Mosaic workspace: My Workspace",
preset: "private_chat",
visibility: "private",
});
expect(mockPrismaService.workspace.update).toHaveBeenCalledWith({
where: { id: "workspace-uuid-1" },
data: { matrixRoomId: "!new-room:example.com" },
});
});
it("should return null when Matrix is not configured (no MatrixService)", async () => {
// Create a service without MatrixService
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixRoomService,
{
provide: PrismaService,
useValue: mockPrismaService,
},
],
}).compile();
const serviceWithoutMatrix = module.get<MatrixRoomService>(MatrixRoomService);
const roomId = await serviceWithoutMatrix.provisionRoom(
"workspace-uuid-1",
"My Workspace",
"my-workspace"
);
expect(roomId).toBeNull();
expect(mockCreateRoom).not.toHaveBeenCalled();
expect(mockPrismaService.workspace.update).not.toHaveBeenCalled();
});
it("should return null when Matrix is not connected", async () => {
mockMatrixService.isConnected.mockReturnValue(false);
const roomId = await service.provisionRoom(
"workspace-uuid-1",
"My Workspace",
"my-workspace"
);
expect(roomId).toBeNull();
expect(mockCreateRoom).not.toHaveBeenCalled();
});
});
describe("getRoomForWorkspace", () => {
it("should return the room ID for a mapped workspace", async () => {
mockPrismaService.workspace.findUnique.mockResolvedValue({
matrixRoomId: "!mapped-room:example.com",
});
const roomId = await service.getRoomForWorkspace("workspace-uuid-1");
expect(roomId).toBe("!mapped-room:example.com");
expect(mockPrismaService.workspace.findUnique).toHaveBeenCalledWith({
where: { id: "workspace-uuid-1" },
select: { matrixRoomId: true },
});
});
it("should return null for an unmapped workspace", async () => {
mockPrismaService.workspace.findUnique.mockResolvedValue({
matrixRoomId: null,
});
const roomId = await service.getRoomForWorkspace("workspace-uuid-2");
expect(roomId).toBeNull();
});
it("should return null for a non-existent workspace", async () => {
mockPrismaService.workspace.findUnique.mockResolvedValue(null);
const roomId = await service.getRoomForWorkspace("non-existent-uuid");
expect(roomId).toBeNull();
});
});
describe("getWorkspaceForRoom", () => {
it("should return the workspace ID for a mapped room", async () => {
mockPrismaService.workspace.findFirst.mockResolvedValue({
id: "workspace-uuid-1",
});
const workspaceId = await service.getWorkspaceForRoom("!mapped-room:example.com");
expect(workspaceId).toBe("workspace-uuid-1");
expect(mockPrismaService.workspace.findFirst).toHaveBeenCalledWith({
where: { matrixRoomId: "!mapped-room:example.com" },
select: { id: true },
});
});
it("should return null for an unmapped room", async () => {
mockPrismaService.workspace.findFirst.mockResolvedValue(null);
const workspaceId = await service.getWorkspaceForRoom("!unknown-room:example.com");
expect(workspaceId).toBeNull();
});
});
describe("linkWorkspaceToRoom", () => {
it("should store the room mapping in the workspace", async () => {
await service.linkWorkspaceToRoom("workspace-uuid-1", "!existing-room:example.com");
expect(mockPrismaService.workspace.update).toHaveBeenCalledWith({
where: { id: "workspace-uuid-1" },
data: { matrixRoomId: "!existing-room:example.com" },
});
});
});
describe("unlinkWorkspace", () => {
it("should remove the room mapping from the workspace", async () => {
await service.unlinkWorkspace("workspace-uuid-1");
expect(mockPrismaService.workspace.update).toHaveBeenCalledWith({
where: { id: "workspace-uuid-1" },
data: { matrixRoomId: null },
});
});
});
});

View File

@@ -0,0 +1,151 @@
import { Injectable, Logger, Optional, Inject } from "@nestjs/common";
import { PrismaService } from "../../prisma/prisma.service";
import { MatrixService } from "./matrix.service";
import type { MatrixClient, RoomCreateOptions } from "matrix-bot-sdk";
/**
* MatrixRoomService - Workspace-to-Matrix-Room mapping and provisioning
*
* Responsibilities:
* - Provision Matrix rooms for Mosaic workspaces
* - Map workspaces to Matrix room IDs
* - Link/unlink existing rooms to workspaces
*
* Room provisioning creates a private Matrix room with:
* - Name: "Mosaic: {workspace_name}"
* - Alias: #mosaic-{workspace_slug}:{server_name}
* - Room ID stored in workspace.matrixRoomId
*/
@Injectable()
export class MatrixRoomService {
private readonly logger = new Logger(MatrixRoomService.name);
constructor(
private readonly prisma: PrismaService,
@Optional() @Inject(MatrixService) private readonly matrixService: MatrixService | null
) {}
/**
* Provision a Matrix room for a workspace and store the mapping.
*
* @param workspaceId - The workspace UUID
* @param workspaceName - Human-readable workspace name
* @param workspaceSlug - URL-safe workspace identifier for the room alias
* @returns The Matrix room ID, or null if Matrix is not configured
*/
async provisionRoom(
workspaceId: string,
workspaceName: string,
workspaceSlug: string
): Promise<string | null> {
if (!this.matrixService?.isConnected()) {
this.logger.warn("Matrix is not configured or not connected; skipping room provisioning");
return null;
}
const client = this.getMatrixClient();
if (!client) {
this.logger.warn("Matrix client is not available; skipping room provisioning");
return null;
}
const roomOptions: RoomCreateOptions = {
name: `Mosaic: ${workspaceName}`,
room_alias_name: `mosaic-${workspaceSlug}`,
topic: `Mosaic workspace: ${workspaceName}`,
preset: "private_chat",
visibility: "private",
};
this.logger.log(
`Provisioning Matrix room for workspace "${workspaceName}" (${workspaceId})...`
);
const roomId = await client.createRoom(roomOptions);
// Store the room mapping
try {
await this.prisma.workspace.update({
where: { id: workspaceId },
data: { matrixRoomId: roomId },
});
} catch (dbError: unknown) {
this.logger.error(
`Failed to store room mapping for workspace ${workspaceId}, room ${roomId} may be orphaned: ${dbError instanceof Error ? dbError.message : "unknown"}`
);
throw dbError;
}
this.logger.log(`Matrix room ${roomId} provisioned and linked to workspace ${workspaceId}`);
return roomId;
}
/**
* Look up the Matrix room ID mapped to a workspace.
*
* @param workspaceId - The workspace UUID
* @returns The Matrix room ID, or null if no room is mapped
*/
async getRoomForWorkspace(workspaceId: string): Promise<string | null> {
const workspace = await this.prisma.workspace.findUnique({
where: { id: workspaceId },
select: { matrixRoomId: true },
});
return workspace?.matrixRoomId ?? null;
}
/**
* Reverse lookup: find the workspace that owns a given Matrix room.
*
* @param roomId - The Matrix room ID (e.g. "!abc:example.com")
* @returns The workspace ID, or null if the room is not mapped to any workspace
*/
async getWorkspaceForRoom(roomId: string): Promise<string | null> {
const workspace = await this.prisma.workspace.findFirst({
where: { matrixRoomId: roomId },
select: { id: true },
});
return workspace?.id ?? null;
}
/**
* Manually link an existing Matrix room to a workspace.
*
* @param workspaceId - The workspace UUID
* @param roomId - The Matrix room ID to link
*/
async linkWorkspaceToRoom(workspaceId: string, roomId: string): Promise<void> {
await this.prisma.workspace.update({
where: { id: workspaceId },
data: { matrixRoomId: roomId },
});
this.logger.log(`Linked workspace ${workspaceId} to Matrix room ${roomId}`);
}
/**
* Remove the Matrix room mapping from a workspace.
*
* @param workspaceId - The workspace UUID
*/
async unlinkWorkspace(workspaceId: string): Promise<void> {
await this.prisma.workspace.update({
where: { id: workspaceId },
data: { matrixRoomId: null },
});
this.logger.log(`Unlinked Matrix room from workspace ${workspaceId}`);
}
/**
* Access the underlying MatrixClient from the MatrixService
* via the public getClient() accessor.
*/
private getMatrixClient(): MatrixClient | null {
if (!this.matrixService) return null;
return this.matrixService.getClient();
}
}

View File

@@ -0,0 +1,408 @@
import { Test, TestingModule } from "@nestjs/testing";
import { MatrixStreamingService } from "./matrix-streaming.service";
import { MatrixService } from "./matrix.service";
import { vi, describe, it, expect, beforeEach, afterEach } from "vitest";
import type { StreamResponseOptions } from "./matrix-streaming.service";
// Mock matrix-bot-sdk to prevent native module loading
vi.mock("matrix-bot-sdk", () => {
return {
MatrixClient: class MockMatrixClient {},
SimpleFsStorageProvider: class MockStorageProvider {
constructor(_filename: string) {
// No-op for testing
}
},
AutojoinRoomsMixin: {
setupOnClient: vi.fn(),
},
};
});
// Mock MatrixClient
const mockClient = {
sendMessage: vi.fn().mockResolvedValue("$initial-event-id"),
sendEvent: vi.fn().mockResolvedValue("$edit-event-id"),
setTyping: vi.fn().mockResolvedValue(undefined),
};
// Mock MatrixService
const mockMatrixService = {
isConnected: vi.fn().mockReturnValue(true),
getClient: vi.fn().mockReturnValue(mockClient),
};
/**
* Helper: create an async iterable from an array of strings with optional delays
*/
async function* createTokenStream(
tokens: string[],
delayMs = 0
): AsyncGenerator<string, void, undefined> {
for (const token of tokens) {
if (delayMs > 0) {
await new Promise((resolve) => setTimeout(resolve, delayMs));
}
yield token;
}
}
/**
* Helper: create a token stream that throws an error mid-stream
*/
async function* createErrorStream(
tokens: string[],
errorAfter: number
): AsyncGenerator<string, void, undefined> {
let count = 0;
for (const token of tokens) {
if (count >= errorAfter) {
throw new Error("LLM provider connection lost");
}
yield token;
count++;
}
}
describe("MatrixStreamingService", () => {
let service: MatrixStreamingService;
beforeEach(async () => {
vi.useFakeTimers({ shouldAdvanceTime: true });
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixStreamingService,
{
provide: MatrixService,
useValue: mockMatrixService,
},
],
}).compile();
service = module.get<MatrixStreamingService>(MatrixStreamingService);
// Clear all mocks
vi.clearAllMocks();
// Re-apply default mock returns after clearing
mockMatrixService.isConnected.mockReturnValue(true);
mockMatrixService.getClient.mockReturnValue(mockClient);
mockClient.sendMessage.mockResolvedValue("$initial-event-id");
mockClient.sendEvent.mockResolvedValue("$edit-event-id");
mockClient.setTyping.mockResolvedValue(undefined);
});
afterEach(() => {
vi.useRealTimers();
});
describe("editMessage", () => {
it("should send a m.replace event to edit an existing message", async () => {
await service.editMessage("!room:example.com", "$original-event-id", "Updated content");
expect(mockClient.sendEvent).toHaveBeenCalledWith("!room:example.com", "m.room.message", {
"m.new_content": {
msgtype: "m.text",
body: "Updated content",
},
"m.relates_to": {
rel_type: "m.replace",
event_id: "$original-event-id",
},
// Fallback for clients that don't support edits
msgtype: "m.text",
body: "* Updated content",
});
});
it("should throw error when client is not connected", async () => {
mockMatrixService.isConnected.mockReturnValue(false);
await expect(
service.editMessage("!room:example.com", "$event-id", "content")
).rejects.toThrow("Matrix client is not connected");
});
it("should throw error when client is null", async () => {
mockMatrixService.getClient.mockReturnValue(null);
await expect(
service.editMessage("!room:example.com", "$event-id", "content")
).rejects.toThrow("Matrix client is not connected");
});
});
describe("setTypingIndicator", () => {
it("should call client.setTyping with true and timeout", async () => {
await service.setTypingIndicator("!room:example.com", true);
expect(mockClient.setTyping).toHaveBeenCalledWith("!room:example.com", true, 30000);
});
it("should call client.setTyping with false to clear indicator", async () => {
await service.setTypingIndicator("!room:example.com", false);
expect(mockClient.setTyping).toHaveBeenCalledWith("!room:example.com", false, undefined);
});
it("should throw error when client is not connected", async () => {
mockMatrixService.isConnected.mockReturnValue(false);
await expect(service.setTypingIndicator("!room:example.com", true)).rejects.toThrow(
"Matrix client is not connected"
);
});
});
describe("sendStreamingMessage", () => {
it("should send an initial message and return the event ID", async () => {
const eventId = await service.sendStreamingMessage("!room:example.com", "Thinking...");
expect(eventId).toBe("$initial-event-id");
expect(mockClient.sendMessage).toHaveBeenCalledWith("!room:example.com", {
msgtype: "m.text",
body: "Thinking...",
});
});
it("should send a thread message when threadId is provided", async () => {
const eventId = await service.sendStreamingMessage(
"!room:example.com",
"Thinking...",
"$thread-root-id"
);
expect(eventId).toBe("$initial-event-id");
expect(mockClient.sendMessage).toHaveBeenCalledWith("!room:example.com", {
msgtype: "m.text",
body: "Thinking...",
"m.relates_to": {
rel_type: "m.thread",
event_id: "$thread-root-id",
is_falling_back: true,
"m.in_reply_to": {
event_id: "$thread-root-id",
},
},
});
});
it("should throw error when client is not connected", async () => {
mockMatrixService.isConnected.mockReturnValue(false);
await expect(service.sendStreamingMessage("!room:example.com", "Test")).rejects.toThrow(
"Matrix client is not connected"
);
});
});
describe("streamResponse", () => {
it("should send initial 'Thinking...' message and start typing indicator", async () => {
vi.useRealTimers();
const tokens = ["Hello", " world"];
const stream = createTokenStream(tokens);
await service.streamResponse("!room:example.com", stream);
// Should have sent initial message
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!room:example.com",
expect.objectContaining({
msgtype: "m.text",
body: "Thinking...",
})
);
// Should have started typing indicator
expect(mockClient.setTyping).toHaveBeenCalledWith("!room:example.com", true, 30000);
});
it("should use custom initial message when provided", async () => {
vi.useRealTimers();
const tokens = ["Hi"];
const stream = createTokenStream(tokens);
const options: StreamResponseOptions = { initialMessage: "Processing..." };
await service.streamResponse("!room:example.com", stream, options);
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!room:example.com",
expect.objectContaining({
body: "Processing...",
})
);
});
it("should edit message with accumulated tokens on completion", async () => {
vi.useRealTimers();
const tokens = ["Hello", " ", "world", "!"];
const stream = createTokenStream(tokens);
await service.streamResponse("!room:example.com", stream);
// The final edit should contain the full accumulated text
const sendEventCalls = mockClient.sendEvent.mock.calls;
const lastEditCall = sendEventCalls[sendEventCalls.length - 1];
expect(lastEditCall).toBeDefined();
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
expect(lastEditCall[2]["m.new_content"].body).toBe("Hello world!");
});
it("should clear typing indicator on completion", async () => {
vi.useRealTimers();
const tokens = ["Done"];
const stream = createTokenStream(tokens);
await service.streamResponse("!room:example.com", stream);
// Last setTyping call should be false
const typingCalls = mockClient.setTyping.mock.calls;
const lastTypingCall = typingCalls[typingCalls.length - 1];
expect(lastTypingCall).toEqual(["!room:example.com", false, undefined]);
});
it("should rate-limit edits to at most one every 500ms", async () => {
vi.useRealTimers();
// Send tokens with small delays - all within one 500ms window
const tokens = ["a", "b", "c", "d", "e"];
const stream = createTokenStream(tokens, 50); // 50ms between tokens = 250ms total
await service.streamResponse("!room:example.com", stream);
// With 250ms total streaming time (5 tokens * 50ms), all tokens arrive
// within one 500ms window. We expect at most 1 intermediate edit + 1 final edit,
// or just the final edit. The key point is that there should NOT be 5 separate edits.
const editCalls = mockClient.sendEvent.mock.calls.filter(
(call) => call[1] === "m.room.message"
);
// Should have fewer edits than tokens (rate limiting in effect)
expect(editCalls.length).toBeLessThanOrEqual(2);
// Should have at least the final edit
expect(editCalls.length).toBeGreaterThanOrEqual(1);
});
it("should handle errors gracefully and edit message with error notice", async () => {
vi.useRealTimers();
const stream = createErrorStream(["Hello", " ", "world"], 2);
await service.streamResponse("!room:example.com", stream);
// Should edit message with error content
const sendEventCalls = mockClient.sendEvent.mock.calls;
const lastEditCall = sendEventCalls[sendEventCalls.length - 1];
expect(lastEditCall).toBeDefined();
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
const finalBody = lastEditCall[2]["m.new_content"].body as string;
expect(finalBody).toContain("error");
// Should clear typing on error
const typingCalls = mockClient.setTyping.mock.calls;
const lastTypingCall = typingCalls[typingCalls.length - 1];
expect(lastTypingCall).toEqual(["!room:example.com", false, undefined]);
});
it("should include token usage in final message when provided", async () => {
vi.useRealTimers();
const tokens = ["Hello"];
const stream = createTokenStream(tokens);
const options: StreamResponseOptions = {
showTokenUsage: true,
tokenUsage: { prompt: 10, completion: 5, total: 15 },
};
await service.streamResponse("!room:example.com", stream, options);
const sendEventCalls = mockClient.sendEvent.mock.calls;
const lastEditCall = sendEventCalls[sendEventCalls.length - 1];
expect(lastEditCall).toBeDefined();
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
const finalBody = lastEditCall[2]["m.new_content"].body as string;
expect(finalBody).toContain("15");
});
it("should throw error when client is not connected", async () => {
mockMatrixService.isConnected.mockReturnValue(false);
const stream = createTokenStream(["test"]);
await expect(service.streamResponse("!room:example.com", stream)).rejects.toThrow(
"Matrix client is not connected"
);
});
it("should handle empty token stream", async () => {
vi.useRealTimers();
const stream = createTokenStream([]);
await service.streamResponse("!room:example.com", stream);
// Should still send initial message
expect(mockClient.sendMessage).toHaveBeenCalled();
// Should edit with empty/no-content message
const sendEventCalls = mockClient.sendEvent.mock.calls;
expect(sendEventCalls.length).toBeGreaterThanOrEqual(1);
// Should clear typing
const typingCalls = mockClient.setTyping.mock.calls;
const lastTypingCall = typingCalls[typingCalls.length - 1];
expect(lastTypingCall).toEqual(["!room:example.com", false, undefined]);
});
it("should support thread context in streamResponse", async () => {
vi.useRealTimers();
const tokens = ["Reply"];
const stream = createTokenStream(tokens);
const options: StreamResponseOptions = { threadId: "$thread-root" };
await service.streamResponse("!room:example.com", stream, options);
// Initial message should include thread relation
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!room:example.com",
expect.objectContaining({
"m.relates_to": expect.objectContaining({
rel_type: "m.thread",
event_id: "$thread-root",
}),
})
);
});
it("should perform multiple edits for long-running streams", async () => {
vi.useRealTimers();
// Create tokens with 200ms delays - total ~2000ms, should get multiple edit windows
const tokens = Array.from({ length: 10 }, (_, i) => `token${String(i)} `);
const stream = createTokenStream(tokens, 200);
await service.streamResponse("!room:example.com", stream);
// With 10 tokens at 200ms each = 2000ms total, at 500ms intervals
// we expect roughly 3-4 intermediate edits + 1 final = 4-5 total
const editCalls = mockClient.sendEvent.mock.calls.filter(
(call) => call[1] === "m.room.message"
);
// Should have multiple edits (at least 2) but far fewer than 10
expect(editCalls.length).toBeGreaterThanOrEqual(2);
expect(editCalls.length).toBeLessThanOrEqual(8);
});
});
});

View File

@@ -0,0 +1,248 @@
import { Injectable, Logger } from "@nestjs/common";
import type { MatrixClient } from "matrix-bot-sdk";
import { MatrixService } from "./matrix.service";
/**
* Options for the streamResponse method
*/
export interface StreamResponseOptions {
/** Custom initial message (defaults to "Thinking...") */
initialMessage?: string;
/** Thread root event ID for threaded responses */
threadId?: string;
/** Whether to show token usage in the final message */
showTokenUsage?: boolean;
/** Token usage stats to display in the final message */
tokenUsage?: { prompt: number; completion: number; total: number };
}
/**
* Matrix message content for m.room.message events
*/
interface MatrixMessageContent {
msgtype: string;
body: string;
"m.new_content"?: {
msgtype: string;
body: string;
};
"m.relates_to"?: {
rel_type: string;
event_id: string;
is_falling_back?: boolean;
"m.in_reply_to"?: {
event_id: string;
};
};
}
/** Minimum interval between message edits (milliseconds) */
const EDIT_INTERVAL_MS = 500;
/** Typing indicator timeout (milliseconds) */
const TYPING_TIMEOUT_MS = 30000;
/**
* Matrix Streaming Service
*
* Provides streaming AI response capabilities for Matrix rooms using
* incremental message edits. Tokens from an LLM are buffered and the
* response message is edited at rate-limited intervals, providing a
* smooth streaming experience without excessive API calls.
*
* Key features:
* - Rate-limited edits (max every 500ms)
* - Typing indicator management during generation
* - Graceful error handling with user-visible error notices
* - Thread support for contextual responses
* - LLM-agnostic design via AsyncIterable<string> token stream
*/
@Injectable()
export class MatrixStreamingService {
private readonly logger = new Logger(MatrixStreamingService.name);
constructor(private readonly matrixService: MatrixService) {}
/**
* Edit an existing Matrix message using the m.replace relation.
*
* Sends a new event that replaces the content of an existing message.
* Includes fallback content for clients that don't support edits.
*
* @param roomId - The Matrix room ID
* @param eventId - The original event ID to replace
* @param newContent - The updated message text
*/
async editMessage(roomId: string, eventId: string, newContent: string): Promise<void> {
const client = this.getClientOrThrow();
const editContent: MatrixMessageContent = {
"m.new_content": {
msgtype: "m.text",
body: newContent,
},
"m.relates_to": {
rel_type: "m.replace",
event_id: eventId,
},
// Fallback for clients that don't support edits
msgtype: "m.text",
body: `* ${newContent}`,
};
await client.sendEvent(roomId, "m.room.message", editContent);
}
/**
* Set the typing indicator for the bot in a room.
*
* @param roomId - The Matrix room ID
* @param typing - Whether the bot is typing
*/
async setTypingIndicator(roomId: string, typing: boolean): Promise<void> {
const client = this.getClientOrThrow();
await client.setTyping(roomId, typing, typing ? TYPING_TIMEOUT_MS : undefined);
}
/**
* Send an initial message for streaming, optionally in a thread.
*
* Returns the event ID of the sent message, which can be used for
* subsequent edits via editMessage.
*
* @param roomId - The Matrix room ID
* @param content - The initial message content
* @param threadId - Optional thread root event ID
* @returns The event ID of the sent message
*/
async sendStreamingMessage(roomId: string, content: string, threadId?: string): Promise<string> {
const client = this.getClientOrThrow();
const messageContent: MatrixMessageContent = {
msgtype: "m.text",
body: content,
};
if (threadId) {
messageContent["m.relates_to"] = {
rel_type: "m.thread",
event_id: threadId,
is_falling_back: true,
"m.in_reply_to": {
event_id: threadId,
},
};
}
const eventId: string = await client.sendMessage(roomId, messageContent);
return eventId;
}
/**
* Stream an AI response to a Matrix room using incremental message edits.
*
* This is the main streaming method. It:
* 1. Sends an initial "Thinking..." message
* 2. Starts the typing indicator
* 3. Buffers incoming tokens from the async iterable
* 4. Edits the message every 500ms with accumulated text
* 5. On completion: sends a final clean edit, clears typing
* 6. On error: edits message with error notice, clears typing
*
* @param roomId - The Matrix room ID
* @param tokenStream - AsyncIterable that yields string tokens
* @param options - Optional configuration for the stream
*/
async streamResponse(
roomId: string,
tokenStream: AsyncIterable<string>,
options?: StreamResponseOptions
): Promise<void> {
// Validate connection before starting
this.getClientOrThrow();
const initialMessage = options?.initialMessage ?? "Thinking...";
const threadId = options?.threadId;
// Step 1: Send initial message
const eventId = await this.sendStreamingMessage(roomId, initialMessage, threadId);
// Step 2: Start typing indicator
await this.setTypingIndicator(roomId, true);
// Step 3: Buffer and stream tokens
let accumulatedText = "";
let lastEditTime = 0;
let hasError = false;
try {
for await (const token of tokenStream) {
accumulatedText += token;
const now = Date.now();
const elapsed = now - lastEditTime;
if (elapsed >= EDIT_INTERVAL_MS && accumulatedText.length > 0) {
await this.editMessage(roomId, eventId, accumulatedText);
lastEditTime = now;
}
}
} catch (error: unknown) {
hasError = true;
const errorMessage = error instanceof Error ? error.message : "Unknown error occurred";
this.logger.error(`Stream error in room ${roomId}: ${errorMessage}`);
// Edit message to show error
try {
const errorContent = accumulatedText
? `${accumulatedText}\n\n[Streaming error: ${errorMessage}]`
: `[Streaming error: ${errorMessage}]`;
await this.editMessage(roomId, eventId, errorContent);
} catch (editError: unknown) {
this.logger.warn(
`Failed to edit error message in ${roomId}: ${editError instanceof Error ? editError.message : "unknown"}`
);
}
} finally {
// Step 4: Clear typing indicator
try {
await this.setTypingIndicator(roomId, false);
} catch (typingError: unknown) {
this.logger.warn(
`Failed to clear typing indicator in ${roomId}: ${typingError instanceof Error ? typingError.message : "unknown"}`
);
}
}
// Step 5: Final edit with clean output (if no error)
if (!hasError) {
let finalContent = accumulatedText || "(No response generated)";
if (options?.showTokenUsage && options.tokenUsage) {
const { prompt, completion, total } = options.tokenUsage;
finalContent += `\n\n---\nTokens: ${String(total)} (prompt: ${String(prompt)}, completion: ${String(completion)})`;
}
await this.editMessage(roomId, eventId, finalContent);
}
}
/**
* Get the Matrix client from the parent MatrixService, or throw if not connected.
*/
private getClientOrThrow(): MatrixClient {
if (!this.matrixService.isConnected()) {
throw new Error("Matrix client is not connected");
}
const client = this.matrixService.getClient();
if (!client) {
throw new Error("Matrix client is not connected");
}
return client;
}
}

View File

@@ -0,0 +1,979 @@
import { Test, TestingModule } from "@nestjs/testing";
import { MatrixService } from "./matrix.service";
import { MatrixRoomService } from "./matrix-room.service";
import { StitcherService } from "../../stitcher/stitcher.service";
import { CommandParserService } from "../parser/command-parser.service";
import { vi, describe, it, expect, beforeEach } from "vitest";
import type { ChatMessage } from "../interfaces";
// Mock matrix-bot-sdk
const mockMessageCallbacks: Array<(roomId: string, event: Record<string, unknown>) => void> = [];
const mockEventCallbacks: Array<(roomId: string, event: Record<string, unknown>) => void> = [];
const mockClient = {
start: vi.fn().mockResolvedValue(undefined),
stop: vi.fn(),
on: vi
.fn()
.mockImplementation(
(event: string, callback: (roomId: string, evt: Record<string, unknown>) => void) => {
if (event === "room.message") {
mockMessageCallbacks.push(callback);
}
if (event === "room.event") {
mockEventCallbacks.push(callback);
}
}
),
sendMessage: vi.fn().mockResolvedValue("$event-id-123"),
sendEvent: vi.fn().mockResolvedValue("$event-id-456"),
};
vi.mock("matrix-bot-sdk", () => {
return {
MatrixClient: class MockMatrixClient {
start = mockClient.start;
stop = mockClient.stop;
on = mockClient.on;
sendMessage = mockClient.sendMessage;
sendEvent = mockClient.sendEvent;
},
SimpleFsStorageProvider: class MockStorageProvider {
constructor(_filename: string) {
// No-op for testing
}
},
AutojoinRoomsMixin: {
setupOnClient: vi.fn(),
},
};
});
describe("MatrixService", () => {
let service: MatrixService;
let stitcherService: StitcherService;
let commandParser: CommandParserService;
let matrixRoomService: MatrixRoomService;
const mockStitcherService = {
dispatchJob: vi.fn().mockResolvedValue({
jobId: "test-job-id",
queueName: "main",
status: "PENDING",
}),
trackJobEvent: vi.fn().mockResolvedValue(undefined),
};
const mockMatrixRoomService = {
getWorkspaceForRoom: vi.fn().mockResolvedValue(null),
getRoomForWorkspace: vi.fn().mockResolvedValue(null),
provisionRoom: vi.fn().mockResolvedValue(null),
linkWorkspaceToRoom: vi.fn().mockResolvedValue(undefined),
unlinkWorkspace: vi.fn().mockResolvedValue(undefined),
};
beforeEach(async () => {
// Set environment variables for testing
process.env.MATRIX_HOMESERVER_URL = "https://matrix.example.com";
process.env.MATRIX_ACCESS_TOKEN = "test-access-token";
process.env.MATRIX_BOT_USER_ID = "@mosaic-bot:example.com";
process.env.MATRIX_CONTROL_ROOM_ID = "!test-room:example.com";
process.env.MATRIX_WORKSPACE_ID = "test-workspace-id";
// Clear callbacks
mockMessageCallbacks.length = 0;
mockEventCallbacks.length = 0;
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixService,
CommandParserService,
{
provide: StitcherService,
useValue: mockStitcherService,
},
{
provide: MatrixRoomService,
useValue: mockMatrixRoomService,
},
],
}).compile();
service = module.get<MatrixService>(MatrixService);
stitcherService = module.get<StitcherService>(StitcherService);
commandParser = module.get<CommandParserService>(CommandParserService);
matrixRoomService = module.get(MatrixRoomService) as MatrixRoomService;
// Clear all mocks
vi.clearAllMocks();
});
describe("Connection Management", () => {
it("should connect to Matrix", async () => {
await service.connect();
expect(mockClient.start).toHaveBeenCalled();
});
it("should disconnect from Matrix", async () => {
await service.connect();
await service.disconnect();
expect(mockClient.stop).toHaveBeenCalled();
});
it("should check connection status", async () => {
expect(service.isConnected()).toBe(false);
await service.connect();
expect(service.isConnected()).toBe(true);
await service.disconnect();
expect(service.isConnected()).toBe(false);
});
});
describe("Message Handling", () => {
it("should send a message to a room", async () => {
await service.connect();
await service.sendMessage("!test-room:example.com", "Hello, Matrix!");
expect(mockClient.sendMessage).toHaveBeenCalledWith("!test-room:example.com", {
msgtype: "m.text",
body: "Hello, Matrix!",
});
});
it("should throw error if client is not connected", async () => {
await expect(service.sendMessage("!room:example.com", "Test")).rejects.toThrow(
"Matrix client is not connected"
);
});
});
describe("Thread Management", () => {
it("should create a thread by sending an initial message", async () => {
await service.connect();
const threadId = await service.createThread({
channelId: "!test-room:example.com",
name: "Job #42",
message: "Starting job...",
});
expect(threadId).toBe("$event-id-123");
expect(mockClient.sendMessage).toHaveBeenCalledWith("!test-room:example.com", {
msgtype: "m.text",
body: "[Job #42] Starting job...",
});
});
it("should send a message to a thread with m.thread relation", async () => {
await service.connect();
await service.sendThreadMessage({
threadId: "$root-event-id",
channelId: "!test-room:example.com",
content: "Step completed",
});
expect(mockClient.sendMessage).toHaveBeenCalledWith("!test-room:example.com", {
msgtype: "m.text",
body: "Step completed",
"m.relates_to": {
rel_type: "m.thread",
event_id: "$root-event-id",
is_falling_back: true,
"m.in_reply_to": {
event_id: "$root-event-id",
},
},
});
});
it("should fall back to controlRoomId when channelId is empty", async () => {
await service.connect();
await service.sendThreadMessage({
threadId: "$root-event-id",
channelId: "",
content: "Fallback message",
});
expect(mockClient.sendMessage).toHaveBeenCalledWith("!test-room:example.com", {
msgtype: "m.text",
body: "Fallback message",
"m.relates_to": {
rel_type: "m.thread",
event_id: "$root-event-id",
is_falling_back: true,
"m.in_reply_to": {
event_id: "$root-event-id",
},
},
});
});
it("should throw error when creating thread without connection", async () => {
await expect(
service.createThread({
channelId: "!room:example.com",
name: "Test",
message: "Test",
})
).rejects.toThrow("Matrix client is not connected");
});
it("should throw error when sending thread message without connection", async () => {
await expect(
service.sendThreadMessage({
threadId: "$event-id",
channelId: "!room:example.com",
content: "Test",
})
).rejects.toThrow("Matrix client is not connected");
});
});
describe("Command Parsing with shared CommandParserService", () => {
it("should parse @mosaic fix #42 via shared parser", () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic fix #42",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).not.toBeNull();
expect(command?.command).toBe("fix");
expect(command?.args).toContain("#42");
});
it("should parse !mosaic fix #42 by normalizing to @mosaic for the shared parser", () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "!mosaic fix #42",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).not.toBeNull();
expect(command?.command).toBe("fix");
expect(command?.args).toContain("#42");
});
it("should parse @mosaic status command via shared parser", () => {
const message: ChatMessage = {
id: "msg-2",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic status job-123",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).not.toBeNull();
expect(command?.command).toBe("status");
expect(command?.args).toContain("job-123");
});
it("should parse @mosaic cancel command via shared parser", () => {
const message: ChatMessage = {
id: "msg-3",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic cancel job-456",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).not.toBeNull();
expect(command?.command).toBe("cancel");
});
it("should parse @mosaic help command via shared parser", () => {
const message: ChatMessage = {
id: "msg-6",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic help",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).not.toBeNull();
expect(command?.command).toBe("help");
});
it("should return null for non-command messages", () => {
const message: ChatMessage = {
id: "msg-7",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "Just a regular message",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).toBeNull();
});
it("should return null for messages without @mosaic or !mosaic mention", () => {
const message: ChatMessage = {
id: "msg-8",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "fix 42",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).toBeNull();
});
it("should return null for @mosaic mention without a command", () => {
const message: ChatMessage = {
id: "msg-11",
channelId: "!room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic",
timestamp: new Date(),
};
const command = service.parseCommand(message);
expect(command).toBeNull();
});
});
describe("Event-driven message reception", () => {
it("should ignore messages from the bot itself", async () => {
await service.connect();
const parseCommandSpy = vi.spyOn(commandParser, "parseCommand");
// Simulate a message from the bot
expect(mockMessageCallbacks.length).toBeGreaterThan(0);
const callback = mockMessageCallbacks[0];
callback?.("!test-room:example.com", {
event_id: "$msg-1",
sender: "@mosaic-bot:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic fix #42",
},
});
// Should not attempt to parse
expect(parseCommandSpy).not.toHaveBeenCalled();
});
it("should ignore messages in unmapped rooms", async () => {
// MatrixRoomService returns null for unknown rooms
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue(null);
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!unknown-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic fix #42",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should not dispatch to stitcher
expect(stitcherService.dispatchJob).not.toHaveBeenCalled();
});
it("should process commands in the control room (fallback workspace)", async () => {
// MatrixRoomService returns null, but room matches controlRoomId
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue(null);
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!test-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic help",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should send help message
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("Available commands:"),
})
);
});
it("should process commands in rooms mapped via MatrixRoomService", async () => {
// MatrixRoomService resolves the workspace
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue("mapped-workspace-id");
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!mapped-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic fix #42",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should dispatch with the mapped workspace ID
expect(stitcherService.dispatchJob).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: "mapped-workspace-id",
})
);
});
it("should handle !mosaic prefix in incoming messages", async () => {
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue("test-workspace-id");
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!test-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "!mosaic help",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should send help message (normalized !mosaic -> @mosaic for parser)
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("Available commands:"),
})
);
});
it("should send help text when user tries an unknown command", async () => {
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue("test-workspace-id");
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!test-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic invalidcommand",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should send error/help message (CommandParserService returns help text for unknown actions)
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("Available commands"),
})
);
});
it("should ignore non-text messages", async () => {
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue("test-workspace-id");
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!test-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.image",
body: "photo.jpg",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should not attempt any message sending
expect(mockClient.sendMessage).not.toHaveBeenCalled();
});
});
describe("Command Execution", () => {
it("should forward fix command to stitcher and create a thread", async () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic fix 42",
timestamp: new Date(),
};
await service.connect();
await service.handleCommand({
command: "fix",
args: ["42"],
message,
});
expect(stitcherService.dispatchJob).toHaveBeenCalledWith({
workspaceId: "test-workspace-id",
type: "code-task",
priority: 10,
metadata: {
issueNumber: 42,
command: "fix",
channelId: "!test-room:example.com",
threadId: "$event-id-123",
authorId: "@user:example.com",
authorName: "@user:example.com",
},
});
});
it("should handle fix with #-prefixed issue number", async () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic fix #42",
timestamp: new Date(),
};
await service.connect();
await service.handleCommand({
command: "fix",
args: ["#42"],
message,
});
expect(stitcherService.dispatchJob).toHaveBeenCalledWith(
expect.objectContaining({
metadata: expect.objectContaining({
issueNumber: 42,
}),
})
);
});
it("should respond with help message", async () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic help",
timestamp: new Date(),
};
await service.connect();
await service.handleCommand({
command: "help",
args: [],
message,
});
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("Available commands:"),
})
);
});
it("should include retry command in help output", async () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic help",
timestamp: new Date(),
};
await service.connect();
await service.handleCommand({
command: "help",
args: [],
message,
});
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("retry"),
})
);
});
it("should send error for fix command without issue number", async () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic fix",
timestamp: new Date(),
};
await service.connect();
await service.handleCommand({
command: "fix",
args: [],
message,
});
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("Usage:"),
})
);
});
it("should send error for fix command with non-numeric issue", async () => {
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic fix abc",
timestamp: new Date(),
};
await service.connect();
await service.handleCommand({
command: "fix",
args: ["abc"],
message,
});
expect(mockClient.sendMessage).toHaveBeenCalledWith(
"!test-room:example.com",
expect.objectContaining({
body: expect.stringContaining("Invalid issue number"),
})
);
});
it("should dispatch fix command with workspace from MatrixRoomService", async () => {
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue("dynamic-workspace-id");
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!mapped-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic fix #99",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
expect(stitcherService.dispatchJob).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: "dynamic-workspace-id",
metadata: expect.objectContaining({
issueNumber: 99,
}),
})
);
});
});
describe("Configuration", () => {
it("should throw error if MATRIX_HOMESERVER_URL is not set", async () => {
delete process.env.MATRIX_HOMESERVER_URL;
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixService,
CommandParserService,
{
provide: StitcherService,
useValue: mockStitcherService,
},
{
provide: MatrixRoomService,
useValue: mockMatrixRoomService,
},
],
}).compile();
const newService = module.get<MatrixService>(MatrixService);
await expect(newService.connect()).rejects.toThrow("MATRIX_HOMESERVER_URL is required");
// Restore for other tests
process.env.MATRIX_HOMESERVER_URL = "https://matrix.example.com";
});
it("should throw error if MATRIX_ACCESS_TOKEN is not set", async () => {
delete process.env.MATRIX_ACCESS_TOKEN;
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixService,
CommandParserService,
{
provide: StitcherService,
useValue: mockStitcherService,
},
{
provide: MatrixRoomService,
useValue: mockMatrixRoomService,
},
],
}).compile();
const newService = module.get<MatrixService>(MatrixService);
await expect(newService.connect()).rejects.toThrow("MATRIX_ACCESS_TOKEN is required");
// Restore for other tests
process.env.MATRIX_ACCESS_TOKEN = "test-access-token";
});
it("should throw error if MATRIX_BOT_USER_ID is not set", async () => {
delete process.env.MATRIX_BOT_USER_ID;
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixService,
CommandParserService,
{
provide: StitcherService,
useValue: mockStitcherService,
},
{
provide: MatrixRoomService,
useValue: mockMatrixRoomService,
},
],
}).compile();
const newService = module.get<MatrixService>(MatrixService);
await expect(newService.connect()).rejects.toThrow("MATRIX_BOT_USER_ID is required");
// Restore for other tests
process.env.MATRIX_BOT_USER_ID = "@mosaic-bot:example.com";
});
it("should throw error if MATRIX_WORKSPACE_ID is not set", async () => {
delete process.env.MATRIX_WORKSPACE_ID;
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixService,
CommandParserService,
{
provide: StitcherService,
useValue: mockStitcherService,
},
{
provide: MatrixRoomService,
useValue: mockMatrixRoomService,
},
],
}).compile();
const newService = module.get<MatrixService>(MatrixService);
await expect(newService.connect()).rejects.toThrow("MATRIX_WORKSPACE_ID is required");
// Restore for other tests
process.env.MATRIX_WORKSPACE_ID = "test-workspace-id";
});
it("should use configured workspace ID from environment", async () => {
const testWorkspaceId = "configured-workspace-456";
process.env.MATRIX_WORKSPACE_ID = testWorkspaceId;
const module: TestingModule = await Test.createTestingModule({
providers: [
MatrixService,
CommandParserService,
{
provide: StitcherService,
useValue: mockStitcherService,
},
{
provide: MatrixRoomService,
useValue: mockMatrixRoomService,
},
],
}).compile();
const newService = module.get<MatrixService>(MatrixService);
const message: ChatMessage = {
id: "msg-1",
channelId: "!test-room:example.com",
authorId: "@user:example.com",
authorName: "@user:example.com",
content: "@mosaic fix 42",
timestamp: new Date(),
};
await newService.connect();
await newService.handleCommand({
command: "fix",
args: ["42"],
message,
});
expect(mockStitcherService.dispatchJob).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: testWorkspaceId,
})
);
// Restore for other tests
process.env.MATRIX_WORKSPACE_ID = "test-workspace-id";
});
});
describe("Error Logging Security", () => {
it("should sanitize sensitive data in error logs", async () => {
const loggerErrorSpy = vi.spyOn(
(service as Record<string, unknown>)["logger"] as { error: (...args: unknown[]) => void },
"error"
);
await service.connect();
// Trigger room.event handler with null event to exercise error path
expect(mockEventCallbacks.length).toBeGreaterThan(0);
mockEventCallbacks[0]?.("!room:example.com", null as unknown as Record<string, unknown>);
// Verify error was logged
expect(loggerErrorSpy).toHaveBeenCalled();
// Get the logged error
const loggedArgs = loggerErrorSpy.mock.calls[0];
const loggedError = loggedArgs?.[1] as Record<string, unknown>;
// Verify non-sensitive error info is preserved
expect(loggedError).toBeDefined();
expect((loggedError as { message: string }).message).toBe("Received null event from Matrix");
});
it("should not include access token in error output", () => {
// Verify the access token is stored privately and not exposed
const serviceAsRecord = service as unknown as Record<string, unknown>;
// The accessToken should exist but should not appear in any public-facing method output
expect(serviceAsRecord["accessToken"]).toBe("test-access-token");
// Verify isConnected does not leak token
const connected = service.isConnected();
expect(String(connected)).not.toContain("test-access-token");
});
});
describe("MatrixRoomService reverse lookup", () => {
it("should call getWorkspaceForRoom when processing messages", async () => {
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue("resolved-workspace");
await service.connect();
const callback = mockMessageCallbacks[0];
callback?.("!some-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic help",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
expect(matrixRoomService.getWorkspaceForRoom).toHaveBeenCalledWith("!some-room:example.com");
});
it("should fall back to control room workspace when MatrixRoomService returns null", async () => {
mockMatrixRoomService.getWorkspaceForRoom.mockResolvedValue(null);
await service.connect();
const callback = mockMessageCallbacks[0];
// Send to the control room (fallback path)
callback?.("!test-room:example.com", {
event_id: "$msg-1",
sender: "@user:example.com",
origin_server_ts: Date.now(),
content: {
msgtype: "m.text",
body: "@mosaic fix #10",
},
});
// Wait for async processing
await new Promise((resolve) => setTimeout(resolve, 50));
// Should dispatch with the env-configured workspace
expect(stitcherService.dispatchJob).toHaveBeenCalledWith(
expect.objectContaining({
workspaceId: "test-workspace-id",
})
);
});
});
});

View File

@@ -0,0 +1,649 @@
import { Injectable, Logger, Optional, Inject } from "@nestjs/common";
import { MatrixClient, SimpleFsStorageProvider, AutojoinRoomsMixin } from "matrix-bot-sdk";
import { StitcherService } from "../../stitcher/stitcher.service";
import { CommandParserService } from "../parser/command-parser.service";
import { CommandAction } from "../parser/command.interface";
import type { ParsedCommand } from "../parser/command.interface";
import { MatrixRoomService } from "./matrix-room.service";
import { sanitizeForLogging } from "../../common/utils";
import type {
IChatProvider,
ChatMessage,
ChatCommand,
ThreadCreateOptions,
ThreadMessageOptions,
} from "../interfaces";
/**
* Matrix room message event content
*/
interface MatrixMessageContent {
msgtype: string;
body: string;
"m.relates_to"?: MatrixRelatesTo;
}
/**
* Matrix relationship metadata for threads (MSC3440)
*/
interface MatrixRelatesTo {
rel_type: string;
event_id: string;
is_falling_back?: boolean;
"m.in_reply_to"?: {
event_id: string;
};
}
/**
* Matrix room event structure
*/
interface MatrixRoomEvent {
event_id: string;
sender: string;
origin_server_ts: number;
content: MatrixMessageContent;
}
/**
* Matrix Service - Matrix chat platform integration
*
* Responsibilities:
* - Connect to Matrix via access token
* - Listen for commands in mapped rooms (via MatrixRoomService)
* - Parse commands using shared CommandParserService
* - Forward commands to stitcher
* - Receive status updates from herald
* - Post updates to threads (MSC3440)
*/
@Injectable()
export class MatrixService implements IChatProvider {
private readonly logger = new Logger(MatrixService.name);
private client: MatrixClient | null = null;
private connected = false;
private readonly homeserverUrl: string;
private readonly accessToken: string;
private readonly botUserId: string;
private readonly controlRoomId: string;
private readonly workspaceId: string;
constructor(
private readonly stitcherService: StitcherService,
@Optional()
@Inject(CommandParserService)
private readonly commandParser: CommandParserService | null,
@Optional()
@Inject(MatrixRoomService)
private readonly matrixRoomService: MatrixRoomService | null
) {
this.homeserverUrl = process.env.MATRIX_HOMESERVER_URL ?? "";
this.accessToken = process.env.MATRIX_ACCESS_TOKEN ?? "";
this.botUserId = process.env.MATRIX_BOT_USER_ID ?? "";
this.controlRoomId = process.env.MATRIX_CONTROL_ROOM_ID ?? "";
this.workspaceId = process.env.MATRIX_WORKSPACE_ID ?? "";
}
/**
* Connect to Matrix homeserver
*/
async connect(): Promise<void> {
if (!this.homeserverUrl) {
throw new Error("MATRIX_HOMESERVER_URL is required");
}
if (!this.accessToken) {
throw new Error("MATRIX_ACCESS_TOKEN is required");
}
if (!this.workspaceId) {
throw new Error("MATRIX_WORKSPACE_ID is required");
}
if (!this.botUserId) {
throw new Error("MATRIX_BOT_USER_ID is required");
}
this.logger.log("Connecting to Matrix...");
const storage = new SimpleFsStorageProvider("matrix-bot-storage.json");
this.client = new MatrixClient(this.homeserverUrl, this.accessToken, storage);
// Auto-join rooms when invited
AutojoinRoomsMixin.setupOnClient(this.client);
// Setup event handlers
this.setupEventHandlers();
// Start syncing
await this.client.start();
this.connected = true;
this.logger.log(`Matrix bot connected as ${this.botUserId}`);
}
/**
* Setup event handlers for Matrix client
*/
private setupEventHandlers(): void {
if (!this.client) return;
this.client.on("room.message", (roomId: string, event: MatrixRoomEvent) => {
// Ignore messages from the bot itself
if (event.sender === this.botUserId) return;
// Only handle text messages
if (event.content.msgtype !== "m.text") return;
this.handleRoomMessage(roomId, event).catch((error: unknown) => {
this.logger.error(
`Error handling room message in ${roomId}:`,
error instanceof Error ? error.message : error
);
});
});
this.client.on("room.event", (_roomId: string, event: MatrixRoomEvent | null) => {
// Handle errors emitted as events
if (!event) {
const error = new Error("Received null event from Matrix");
const sanitizedError = sanitizeForLogging(error);
this.logger.error("Matrix client error:", sanitizedError);
}
});
}
/**
* Handle an incoming room message.
*
* Resolves the workspace for the room (via MatrixRoomService or fallback
* to the control room), then delegates to the shared CommandParserService
* for platform-agnostic command parsing and dispatches the result.
*/
private async handleRoomMessage(roomId: string, event: MatrixRoomEvent): Promise<void> {
// Resolve workspace: try MatrixRoomService first, fall back to control room
let resolvedWorkspaceId: string | null = null;
if (this.matrixRoomService) {
resolvedWorkspaceId = await this.matrixRoomService.getWorkspaceForRoom(roomId);
}
// Fallback: if the room is the configured control room, use the env workspace
if (!resolvedWorkspaceId && roomId === this.controlRoomId) {
resolvedWorkspaceId = this.workspaceId;
}
// If room is not mapped to any workspace, ignore the message
if (!resolvedWorkspaceId) {
return;
}
const messageContent = event.content.body;
// Build ChatMessage for interface compatibility
const chatMessage: ChatMessage = {
id: event.event_id,
channelId: roomId,
authorId: event.sender,
authorName: event.sender,
content: messageContent,
timestamp: new Date(event.origin_server_ts),
...(event.content["m.relates_to"]?.rel_type === "m.thread" && {
threadId: event.content["m.relates_to"].event_id,
}),
};
// Use shared CommandParserService if available
if (this.commandParser) {
// Normalize !mosaic to @mosaic for the shared parser
const normalizedContent = messageContent.replace(/^!mosaic/i, "@mosaic");
const result = this.commandParser.parseCommand(normalizedContent);
if (result.success) {
await this.handleParsedCommand(result.command, chatMessage, resolvedWorkspaceId);
} else if (normalizedContent.toLowerCase().startsWith("@mosaic")) {
// The user tried to use a command but it failed to parse -- send help
await this.sendMessage(roomId, result.error.help ?? result.error.message);
}
return;
}
// Fallback: use the built-in parseCommand if CommandParserService not injected
const command = this.parseCommand(chatMessage);
if (command) {
await this.handleCommand(command);
}
}
/**
* Handle a command parsed by the shared CommandParserService.
*
* Routes the ParsedCommand to the appropriate handler, passing
* along workspace context for job dispatch.
*/
private async handleParsedCommand(
parsed: ParsedCommand,
message: ChatMessage,
workspaceId: string
): Promise<void> {
this.logger.log(
`Handling command: ${parsed.action} from ${message.authorName} in workspace ${workspaceId}`
);
switch (parsed.action) {
case CommandAction.FIX:
await this.handleFixCommand(parsed.rawArgs, message, workspaceId);
break;
case CommandAction.STATUS:
await this.handleStatusCommand(parsed.rawArgs, message);
break;
case CommandAction.CANCEL:
await this.handleCancelCommand(parsed.rawArgs, message);
break;
case CommandAction.VERBOSE:
await this.handleVerboseCommand(parsed.rawArgs, message);
break;
case CommandAction.QUIET:
await this.handleQuietCommand(parsed.rawArgs, message);
break;
case CommandAction.HELP:
await this.handleHelpCommand(parsed.rawArgs, message);
break;
case CommandAction.RETRY:
await this.handleRetryCommand(parsed.rawArgs, message);
break;
default:
await this.sendMessage(
message.channelId,
`Unknown command. Type \`@mosaic help\` or \`!mosaic help\` for available commands.`
);
}
}
/**
* Disconnect from Matrix
*/
disconnect(): Promise<void> {
this.logger.log("Disconnecting from Matrix...");
this.connected = false;
if (this.client) {
this.client.stop();
}
return Promise.resolve();
}
/**
* Check if the provider is connected
*/
isConnected(): boolean {
return this.connected;
}
/**
* Get the underlying MatrixClient instance.
*
* Used by MatrixStreamingService for low-level operations
* (message edits, typing indicators) that require direct client access.
*
* @returns The MatrixClient instance, or null if not connected
*/
getClient(): MatrixClient | null {
return this.client;
}
/**
* Send a message to a room
*/
async sendMessage(roomId: string, content: string): Promise<void> {
if (!this.client) {
throw new Error("Matrix client is not connected");
}
const messageContent: MatrixMessageContent = {
msgtype: "m.text",
body: content,
};
await this.client.sendMessage(roomId, messageContent);
}
/**
* Create a thread for job updates (MSC3440)
*
* Matrix threads are created by sending an initial message
* and then replying with m.thread relation. The initial
* message event ID becomes the thread root.
*/
async createThread(options: ThreadCreateOptions): Promise<string> {
if (!this.client) {
throw new Error("Matrix client is not connected");
}
const { channelId, name, message } = options;
// Send the initial message that becomes the thread root
const initialContent: MatrixMessageContent = {
msgtype: "m.text",
body: `[${name}] ${message}`,
};
const eventId = await this.client.sendMessage(channelId, initialContent);
return eventId;
}
/**
* Send a message to a thread (MSC3440)
*
* Uses m.thread relation to associate the message with the thread root event.
*/
async sendThreadMessage(options: ThreadMessageOptions): Promise<void> {
if (!this.client) {
throw new Error("Matrix client is not connected");
}
const { threadId, channelId, content } = options;
// Use the channelId from options (threads are room-scoped), fall back to control room
const roomId = channelId || this.controlRoomId;
const threadContent: MatrixMessageContent = {
msgtype: "m.text",
body: content,
"m.relates_to": {
rel_type: "m.thread",
event_id: threadId,
is_falling_back: true,
"m.in_reply_to": {
event_id: threadId,
},
},
};
await this.client.sendMessage(roomId, threadContent);
}
/**
* Parse a command from a message (IChatProvider interface).
*
* Delegates to the shared CommandParserService when available,
* falling back to built-in parsing for backwards compatibility.
*/
parseCommand(message: ChatMessage): ChatCommand | null {
const { content } = message;
// Try shared parser first
if (this.commandParser) {
const normalizedContent = content.replace(/^!mosaic/i, "@mosaic");
const result = this.commandParser.parseCommand(normalizedContent);
if (result.success) {
return {
command: result.command.action,
args: result.command.rawArgs,
message,
};
}
return null;
}
// Fallback: built-in parsing for when CommandParserService is not injected
const lowerContent = content.toLowerCase();
if (!lowerContent.includes("@mosaic") && !lowerContent.includes("!mosaic")) {
return null;
}
const parts = content.trim().split(/\s+/);
const mosaicIndex = parts.findIndex(
(part) => part.toLowerCase().includes("@mosaic") || part.toLowerCase().includes("!mosaic")
);
if (mosaicIndex === -1 || mosaicIndex === parts.length - 1) {
return null;
}
const commandPart = parts[mosaicIndex + 1];
if (!commandPart) {
return null;
}
const command = commandPart.toLowerCase();
const args = parts.slice(mosaicIndex + 2);
const validCommands = ["fix", "status", "cancel", "verbose", "quiet", "help"];
if (!validCommands.includes(command)) {
return null;
}
return {
command,
args,
message,
};
}
/**
* Handle a parsed command (ChatCommand format, used by fallback path)
*/
async handleCommand(command: ChatCommand): Promise<void> {
const { command: cmd, args, message } = command;
this.logger.log(
`Handling command: ${cmd} with args: ${args.join(", ")} from ${message.authorName}`
);
switch (cmd) {
case "fix":
await this.handleFixCommand(args, message, this.workspaceId);
break;
case "status":
await this.handleStatusCommand(args, message);
break;
case "cancel":
await this.handleCancelCommand(args, message);
break;
case "verbose":
await this.handleVerboseCommand(args, message);
break;
case "quiet":
await this.handleQuietCommand(args, message);
break;
case "help":
await this.handleHelpCommand(args, message);
break;
default:
await this.sendMessage(
message.channelId,
`Unknown command: ${cmd}. Type \`@mosaic help\` or \`!mosaic help\` for available commands.`
);
}
}
/**
* Handle fix command - Start a job for an issue
*/
private async handleFixCommand(
args: string[],
message: ChatMessage,
workspaceId?: string
): Promise<void> {
if (args.length === 0 || !args[0]) {
await this.sendMessage(
message.channelId,
"Usage: `@mosaic fix <issue-number>` or `!mosaic fix <issue-number>`"
);
return;
}
// Parse issue number: handle both "#42" and "42" formats
const issueArg = args[0].replace(/^#/, "");
const issueNumber = parseInt(issueArg, 10);
if (isNaN(issueNumber)) {
await this.sendMessage(
message.channelId,
"Invalid issue number. Please provide a numeric issue number."
);
return;
}
const targetWorkspaceId = workspaceId ?? this.workspaceId;
// Create thread for job updates
const threadId = await this.createThread({
channelId: message.channelId,
name: `Job #${String(issueNumber)}`,
message: `Starting job for issue #${String(issueNumber)}...`,
});
// Dispatch job to stitcher
try {
const result = await this.stitcherService.dispatchJob({
workspaceId: targetWorkspaceId,
type: "code-task",
priority: 10,
metadata: {
issueNumber,
command: "fix",
channelId: message.channelId,
threadId: threadId,
authorId: message.authorId,
authorName: message.authorName,
},
});
// Send confirmation to thread
await this.sendThreadMessage({
threadId,
channelId: message.channelId,
content: `Job created: ${result.jobId}\nStatus: ${result.status}\nQueue: ${result.queueName}`,
});
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : "Unknown error";
this.logger.error(
`Failed to dispatch job for issue #${String(issueNumber)}: ${errorMessage}`
);
await this.sendThreadMessage({
threadId,
channelId: message.channelId,
content: `Failed to start job: ${errorMessage}`,
});
}
}
/**
* Handle status command - Get job status
*/
private async handleStatusCommand(args: string[], message: ChatMessage): Promise<void> {
if (args.length === 0 || !args[0]) {
await this.sendMessage(
message.channelId,
"Usage: `@mosaic status <job-id>` or `!mosaic status <job-id>`"
);
return;
}
const jobId = args[0];
// TODO: Implement job status retrieval from stitcher
await this.sendMessage(
message.channelId,
`Status command not yet implemented for job: ${jobId}`
);
}
/**
* Handle cancel command - Cancel a running job
*/
private async handleCancelCommand(args: string[], message: ChatMessage): Promise<void> {
if (args.length === 0 || !args[0]) {
await this.sendMessage(
message.channelId,
"Usage: `@mosaic cancel <job-id>` or `!mosaic cancel <job-id>`"
);
return;
}
const jobId = args[0];
// TODO: Implement job cancellation in stitcher
await this.sendMessage(
message.channelId,
`Cancel command not yet implemented for job: ${jobId}`
);
}
/**
* Handle retry command - Retry a failed job
*/
private async handleRetryCommand(args: string[], message: ChatMessage): Promise<void> {
if (args.length === 0 || !args[0]) {
await this.sendMessage(
message.channelId,
"Usage: `@mosaic retry <job-id>` or `!mosaic retry <job-id>`"
);
return;
}
const jobId = args[0];
// TODO: Implement job retry in stitcher
await this.sendMessage(
message.channelId,
`Retry command not yet implemented for job: ${jobId}`
);
}
/**
* Handle verbose command - Stream full logs to thread
*/
private async handleVerboseCommand(args: string[], message: ChatMessage): Promise<void> {
if (args.length === 0 || !args[0]) {
await this.sendMessage(
message.channelId,
"Usage: `@mosaic verbose <job-id>` or `!mosaic verbose <job-id>`"
);
return;
}
const jobId = args[0];
// TODO: Implement verbose logging
await this.sendMessage(message.channelId, `Verbose mode not yet implemented for job: ${jobId}`);
}
/**
* Handle quiet command - Reduce notifications
*/
private async handleQuietCommand(_args: string[], message: ChatMessage): Promise<void> {
// TODO: Implement quiet mode
await this.sendMessage(
message.channelId,
"Quiet mode not yet implemented. Currently showing milestone updates only."
);
}
/**
* Handle help command - Show available commands
*/
private async handleHelpCommand(_args: string[], message: ChatMessage): Promise<void> {
const helpMessage = `
**Available commands:**
\`@mosaic fix <issue>\` or \`!mosaic fix <issue>\` - Start job for issue
\`@mosaic status <job>\` or \`!mosaic status <job>\` - Get job status
\`@mosaic cancel <job>\` or \`!mosaic cancel <job>\` - Cancel running job
\`@mosaic retry <job>\` or \`!mosaic retry <job>\` - Retry failed job
\`@mosaic verbose <job>\` or \`!mosaic verbose <job>\` - Stream full logs to thread
\`@mosaic quiet\` or \`!mosaic quiet\` - Reduce notifications
\`@mosaic help\` or \`!mosaic help\` - Show this help message
**Noise Management:**
- Main room: Low verbosity (milestones only)
- Job threads: Medium verbosity (step completions)
- DMs: Configurable per user
`.trim();
await this.sendMessage(message.channelId, helpMessage);
}
}

View File

@@ -10,7 +10,7 @@ import { BridgeModule } from "../bridge/bridge.module";
* - Subscribe to job events
* - Format status messages with PDA-friendly language
* - Route to appropriate channels based on workspace config
* - Support Discord (via bridge) and PR comments
* - Broadcast to ALL active chat providers via CHAT_PROVIDERS token
*/
@Module({
imports: [PrismaModule, BridgeModule],

View File

@@ -2,7 +2,8 @@ import { Test, TestingModule } from "@nestjs/testing";
import { vi, describe, it, expect, beforeEach } from "vitest";
import { HeraldService } from "./herald.service";
import { PrismaService } from "../prisma/prisma.service";
import { DiscordService } from "../bridge/discord/discord.service";
import { CHAT_PROVIDERS } from "../bridge/bridge.constants";
import type { IChatProvider } from "../bridge/interfaces/chat-provider.interface";
import {
JOB_CREATED,
JOB_STARTED,
@@ -14,10 +15,31 @@ import {
GATE_FAILED,
} from "../job-events/event-types";
function createMockProvider(
name: string,
connected = true
): IChatProvider & {
sendMessage: ReturnType<typeof vi.fn>;
sendThreadMessage: ReturnType<typeof vi.fn>;
createThread: ReturnType<typeof vi.fn>;
isConnected: ReturnType<typeof vi.fn>;
connect: ReturnType<typeof vi.fn>;
disconnect: ReturnType<typeof vi.fn>;
parseCommand: ReturnType<typeof vi.fn>;
} {
return {
connect: vi.fn().mockResolvedValue(undefined),
disconnect: vi.fn().mockResolvedValue(undefined),
isConnected: vi.fn().mockReturnValue(connected),
sendMessage: vi.fn().mockResolvedValue(undefined),
createThread: vi.fn().mockResolvedValue("thread-id"),
sendThreadMessage: vi.fn().mockResolvedValue(undefined),
parseCommand: vi.fn().mockReturnValue(null),
};
}
describe("HeraldService", () => {
let service: HeraldService;
let prisma: PrismaService;
let discord: DiscordService;
const mockPrisma = {
workspace: {
@@ -31,14 +53,15 @@ describe("HeraldService", () => {
},
};
const mockDiscord = {
isConnected: vi.fn(),
sendMessage: vi.fn(),
sendThreadMessage: vi.fn(),
createThread: vi.fn(),
};
let mockProviderA: ReturnType<typeof createMockProvider>;
let mockProviderB: ReturnType<typeof createMockProvider>;
let chatProviders: IChatProvider[];
beforeEach(async () => {
mockProviderA = createMockProvider("providerA", true);
mockProviderB = createMockProvider("providerB", true);
chatProviders = [mockProviderA, mockProviderB];
const module: TestingModule = await Test.createTestingModule({
providers: [
HeraldService,
@@ -47,25 +70,47 @@ describe("HeraldService", () => {
useValue: mockPrisma,
},
{
provide: DiscordService,
useValue: mockDiscord,
provide: CHAT_PROVIDERS,
useValue: chatProviders,
},
],
}).compile();
service = module.get<HeraldService>(HeraldService);
prisma = module.get<PrismaService>(PrismaService);
discord = module.get<DiscordService>(DiscordService);
// Reset mocks
vi.clearAllMocks();
// Restore default connected state after clearAllMocks
mockProviderA.isConnected.mockReturnValue(true);
mockProviderB.isConnected.mockReturnValue(true);
});
describe("broadcastJobEvent", () => {
it("should broadcast job.created event to configured channel", async () => {
// Arrange
const baseSetup = (): {
jobId: string;
workspaceId: string;
} => {
const workspaceId = "workspace-1";
const jobId = "job-1";
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { issueNumber: 42, threadId: "thread-123", channelId: "channel-abc" },
},
});
return { jobId, workspaceId };
};
it("should broadcast to all connected providers", async () => {
// Arrange
const { jobId } = baseSetup();
const event = {
id: "event-1",
jobId,
@@ -75,46 +120,25 @@ describe("HeraldService", () => {
payload: { issueNumber: 42 },
};
mockPrisma.workspace.findUnique.mockResolvedValue({
id: workspaceId,
settings: {
herald: {
channelMappings: {
"code-task": "channel-123",
},
},
},
});
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { issueNumber: 42, threadId: "thread-123" },
},
});
mockDiscord.isConnected.mockReturnValue(true);
mockDiscord.sendThreadMessage.mockResolvedValue(undefined);
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockDiscord.sendThreadMessage).toHaveBeenCalledWith({
expect(mockProviderA.sendThreadMessage).toHaveBeenCalledWith({
threadId: "thread-123",
channelId: "channel-abc",
content: expect.stringContaining("Job created"),
});
expect(mockProviderB.sendThreadMessage).toHaveBeenCalledWith({
threadId: "thread-123",
channelId: "channel-abc",
content: expect.stringContaining("Job created"),
});
});
it("should broadcast job.started event", async () => {
it("should broadcast job.started event to all providers", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const { jobId } = baseSetup();
const event = {
id: "event-1",
jobId,
@@ -124,40 +148,25 @@ describe("HeraldService", () => {
payload: {},
};
mockPrisma.workspace.findUnique.mockResolvedValue({
id: workspaceId,
settings: { herald: { channelMappings: {} } },
});
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { threadId: "thread-123" },
},
});
mockDiscord.isConnected.mockReturnValue(true);
mockDiscord.sendThreadMessage.mockResolvedValue(undefined);
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockDiscord.sendThreadMessage).toHaveBeenCalledWith({
expect(mockProviderA.sendThreadMessage).toHaveBeenCalledWith({
threadId: "thread-123",
channelId: "channel-abc",
content: expect.stringContaining("Job started"),
});
expect(mockProviderB.sendThreadMessage).toHaveBeenCalledWith({
threadId: "thread-123",
channelId: "channel-abc",
content: expect.stringContaining("Job started"),
});
});
it("should broadcast job.completed event with success message", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const { jobId } = baseSetup();
const event = {
id: "event-1",
jobId,
@@ -167,40 +176,20 @@ describe("HeraldService", () => {
payload: { duration: 120 },
};
mockPrisma.workspace.findUnique.mockResolvedValue({
id: workspaceId,
settings: { herald: { channelMappings: {} } },
});
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { threadId: "thread-123" },
},
});
mockDiscord.isConnected.mockReturnValue(true);
mockDiscord.sendThreadMessage.mockResolvedValue(undefined);
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockDiscord.sendThreadMessage).toHaveBeenCalledWith({
expect(mockProviderA.sendThreadMessage).toHaveBeenCalledWith({
threadId: "thread-123",
channelId: "channel-abc",
content: expect.stringContaining("completed"),
});
});
it("should broadcast job.failed event with PDA-friendly language", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const { jobId } = baseSetup();
const event = {
id: "event-1",
jobId,
@@ -210,43 +199,30 @@ describe("HeraldService", () => {
payload: { error: "Build failed" },
};
mockPrisma.workspace.findUnique.mockResolvedValue({
id: workspaceId,
settings: { herald: { channelMappings: {} } },
});
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { threadId: "thread-123" },
},
});
mockDiscord.isConnected.mockReturnValue(true);
mockDiscord.sendThreadMessage.mockResolvedValue(undefined);
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockDiscord.sendThreadMessage).toHaveBeenCalledWith({
expect(mockProviderA.sendThreadMessage).toHaveBeenCalledWith({
threadId: "thread-123",
channelId: "channel-abc",
content: expect.stringContaining("encountered an issue"),
});
// Verify the actual message doesn't contain demanding language
const actualCall = mockDiscord.sendThreadMessage.mock.calls[0][0];
const actualCall = mockProviderA.sendThreadMessage.mock.calls[0][0] as {
threadId: string;
channelId: string;
content: string;
};
expect(actualCall.content).not.toMatch(/FAILED|ERROR|CRITICAL|URGENT/);
});
it("should skip broadcasting if Discord is not connected", async () => {
it("should skip disconnected providers", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const { jobId } = baseSetup();
mockProviderA.isConnected.mockReturnValue(true);
mockProviderB.isConnected.mockReturnValue(false);
const event = {
id: "event-1",
jobId,
@@ -256,14 +232,36 @@ describe("HeraldService", () => {
payload: {},
};
mockPrisma.workspace.findUnique.mockResolvedValue({
id: workspaceId,
settings: { herald: { channelMappings: {} } },
});
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockProviderA.sendThreadMessage).toHaveBeenCalledTimes(1);
expect(mockProviderB.sendThreadMessage).not.toHaveBeenCalled();
});
it("should handle empty providers array without crashing", async () => {
// Arrange — rebuild module with empty providers
const module: TestingModule = await Test.createTestingModule({
providers: [
HeraldService,
{
provide: PrismaService,
useValue: mockPrisma,
},
{
provide: CHAT_PROVIDERS,
useValue: [],
},
],
}).compile();
const emptyService = module.get<HeraldService>(HeraldService);
const jobId = "job-1";
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
workspaceId: "workspace-1",
type: "code-task",
});
@@ -273,36 +271,68 @@ describe("HeraldService", () => {
},
});
mockDiscord.isConnected.mockReturnValue(false);
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
// Act
// Act & Assert — should not throw
await expect(emptyService.broadcastJobEvent(jobId, event)).resolves.not.toThrow();
});
it("should continue broadcasting when one provider errors", async () => {
// Arrange
const { jobId } = baseSetup();
mockProviderA.sendThreadMessage.mockRejectedValue(new Error("Provider A rate limit"));
mockProviderB.sendThreadMessage.mockResolvedValue(undefined);
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
// Act — should not throw despite provider A failing
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockDiscord.sendThreadMessage).not.toHaveBeenCalled();
// Assert — provider B should still have been called
expect(mockProviderA.sendThreadMessage).toHaveBeenCalledTimes(1);
expect(mockProviderB.sendThreadMessage).toHaveBeenCalledTimes(1);
});
it("should not throw when all providers error", async () => {
// Arrange
const { jobId } = baseSetup();
mockProviderA.sendThreadMessage.mockRejectedValue(new Error("Provider A down"));
mockProviderB.sendThreadMessage.mockRejectedValue(new Error("Provider B down"));
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
// Act & Assert — should not throw; provider errors are logged, not propagated
await expect(service.broadcastJobEvent(jobId, event)).resolves.not.toThrow();
});
it("should skip broadcasting if job has no threadId", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
mockPrisma.workspace.findUnique.mockResolvedValue({
id: workspaceId,
settings: { herald: { channelMappings: {} } },
});
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
workspaceId: "workspace-1",
type: "code-task",
});
@@ -312,16 +342,45 @@ describe("HeraldService", () => {
},
});
mockDiscord.isConnected.mockReturnValue(true);
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockDiscord.sendThreadMessage).not.toHaveBeenCalled();
expect(mockProviderA.sendThreadMessage).not.toHaveBeenCalled();
expect(mockProviderB.sendThreadMessage).not.toHaveBeenCalled();
});
// ERROR HANDLING TESTS - Issue #185
it("should skip broadcasting if job not found", async () => {
// Arrange
const jobId = "nonexistent-job";
mockPrisma.runnerJob.findUnique.mockResolvedValue(null);
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
// Act
await service.broadcastJobEvent(jobId, event);
// Assert
expect(mockProviderA.sendThreadMessage).not.toHaveBeenCalled();
});
// ERROR HANDLING TESTS - database errors should still propagate
it("should propagate database errors when job lookup fails", async () => {
// Arrange
@@ -344,43 +403,8 @@ describe("HeraldService", () => {
);
});
it("should propagate Discord send failures with context", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const event = {
id: "event-1",
jobId,
type: JOB_CREATED,
timestamp: new Date(),
actor: "system",
payload: {},
};
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { threadId: "thread-123" },
},
});
mockDiscord.isConnected.mockReturnValue(true);
const discordError = new Error("Rate limit exceeded");
mockDiscord.sendThreadMessage.mockRejectedValue(discordError);
// Act & Assert
await expect(service.broadcastJobEvent(jobId, event)).rejects.toThrow("Rate limit exceeded");
});
it("should propagate errors when fetching job events fails", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "job-1";
const event = {
id: "event-1",
@@ -393,61 +417,16 @@ describe("HeraldService", () => {
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
workspaceId: "workspace-1",
type: "code-task",
});
const dbError = new Error("Query timeout");
mockPrisma.jobEvent.findFirst.mockRejectedValue(dbError);
mockDiscord.isConnected.mockReturnValue(true);
// Act & Assert
await expect(service.broadcastJobEvent(jobId, event)).rejects.toThrow("Query timeout");
});
it("should include job context in error messages", async () => {
// Arrange
const workspaceId = "workspace-1";
const jobId = "test-job-123";
const event = {
id: "event-1",
jobId,
type: JOB_COMPLETED,
timestamp: new Date(),
actor: "system",
payload: {},
};
mockPrisma.runnerJob.findUnique.mockResolvedValue({
id: jobId,
workspaceId,
type: "code-task",
});
mockPrisma.jobEvent.findFirst.mockResolvedValue({
payload: {
metadata: { threadId: "thread-123" },
},
});
mockDiscord.isConnected.mockReturnValue(true);
const discordError = new Error("Network failure");
mockDiscord.sendThreadMessage.mockRejectedValue(discordError);
// Act & Assert
try {
await service.broadcastJobEvent(jobId, event);
// Should not reach here
expect(true).toBe(false);
} catch (error) {
// Verify error was thrown
expect(error).toBeDefined();
// Verify original error is preserved
expect((error as Error).message).toContain("Network failure");
}
});
});
describe("formatJobEventMessage", () => {
@@ -473,7 +452,6 @@ describe("HeraldService", () => {
const message = service.formatJobEventMessage(event, job, metadata);
// Assert
expect(message).toContain("🟢");
expect(message).toContain("Job created");
expect(message).toContain("#42");
expect(message.length).toBeLessThan(200); // Keep it scannable
@@ -526,7 +504,6 @@ describe("HeraldService", () => {
const message = service.formatJobEventMessage(event, job, metadata);
// Assert
expect(message).toMatch(/✅|🟢/);
expect(message).toContain("completed");
expect(message).not.toMatch(/COMPLETED|SUCCESS/);
});

View File

@@ -1,6 +1,7 @@
import { Injectable, Logger } from "@nestjs/common";
import { Inject, Injectable, Logger } from "@nestjs/common";
import { PrismaService } from "../prisma/prisma.service";
import { DiscordService } from "../bridge/discord/discord.service";
import { CHAT_PROVIDERS } from "../bridge/bridge.constants";
import type { IChatProvider } from "../bridge/interfaces/chat-provider.interface";
import {
JOB_CREATED,
JOB_STARTED,
@@ -21,7 +22,7 @@ import {
* - Subscribe to job events
* - Format status messages with PDA-friendly language
* - Route to appropriate channels based on workspace config
* - Support Discord (via bridge) and PR comments
* - Broadcast to ALL active chat providers (Discord, Matrix, etc.)
*/
@Injectable()
export class HeraldService {
@@ -29,11 +30,11 @@ export class HeraldService {
constructor(
private readonly prisma: PrismaService,
private readonly discord: DiscordService
@Inject(CHAT_PROVIDERS) private readonly chatProviders: IChatProvider[]
) {}
/**
* Broadcast a job event to the appropriate channel
* Broadcast a job event to all connected chat providers
*/
async broadcastJobEvent(
jobId: string,
@@ -47,66 +48,68 @@ export class HeraldService {
payload: unknown;
}
): Promise<void> {
try {
// Get job details
const job = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
select: {
id: true,
workspaceId: true,
type: true,
},
});
// Get job details
const job = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
select: {
id: true,
workspaceId: true,
type: true,
},
});
if (!job) {
this.logger.warn(`Job ${jobId} not found, skipping broadcast`);
return;
}
// Check if Discord is connected
if (!this.discord.isConnected()) {
this.logger.debug("Discord not connected, skipping broadcast");
return;
}
// Get threadId from first event payload (job.created event has metadata)
const firstEvent = await this.prisma.jobEvent.findFirst({
where: {
jobId,
type: JOB_CREATED,
},
select: {
payload: true,
},
});
const firstEventPayload = firstEvent?.payload as Record<string, unknown> | undefined;
const metadata = firstEventPayload?.metadata as Record<string, unknown> | undefined;
const threadId = metadata?.threadId as string | undefined;
if (!threadId) {
this.logger.debug(`Job ${jobId} has no threadId, skipping broadcast`);
return;
}
// Format message
const message = this.formatJobEventMessage(event, job, metadata);
// Send to thread
await this.discord.sendThreadMessage({
threadId,
content: message,
});
this.logger.debug(`Broadcasted event ${event.type} for job ${jobId} to thread ${threadId}`);
} catch (error) {
// Log the error with full context for debugging
this.logger.error(`Failed to broadcast event ${event.type} for job ${jobId}:`, error);
// Re-throw the error so callers can handle it appropriately
// This enables proper error tracking, retry logic, and alerting
throw error;
if (!job) {
this.logger.warn(`Job ${jobId} not found, skipping broadcast`);
return;
}
// Get threadId from first event payload (job.created event has metadata)
const firstEvent = await this.prisma.jobEvent.findFirst({
where: {
jobId,
type: JOB_CREATED,
},
select: {
payload: true,
},
});
const firstEventPayload = firstEvent?.payload as Record<string, unknown> | undefined;
const metadata = firstEventPayload?.metadata as Record<string, unknown> | undefined;
const threadId = metadata?.threadId as string | undefined;
const channelId = metadata?.channelId as string | undefined;
if (!threadId) {
this.logger.debug(`Job ${jobId} has no threadId, skipping broadcast`);
return;
}
// Format message
const message = this.formatJobEventMessage(event, job, metadata);
// Broadcast to all connected providers
for (const provider of this.chatProviders) {
if (!provider.isConnected()) {
continue;
}
try {
await provider.sendThreadMessage({
threadId,
channelId: channelId ?? "",
content: message,
});
} catch (error: unknown) {
// Log and continue — one provider failure must not block others
const providerName = provider.constructor.name;
this.logger.error(
`Failed to broadcast event ${event.type} for job ${jobId} via ${providerName}:`,
error instanceof Error ? error.message : error
);
}
}
this.logger.debug(`Broadcasted event ${event.type} for job ${jobId} to thread ${threadId}`);
}
/**

View File

@@ -0,0 +1,109 @@
/**
* LLM Cost Table
*
* Maps model names to per-token costs in microdollars (USD * 1,000,000).
* For example, $0.003 per 1K tokens = 3,000 microdollars per 1K tokens = 3 microdollars per token.
*
* Costs are split into input (prompt) and output (completion) pricing.
* Ollama models run locally and are free (0 cost).
*/
/**
* Per-token cost in microdollars for a single model.
*/
export interface ModelCost {
/** Cost per input token in microdollars */
inputPerToken: number;
/** Cost per output token in microdollars */
outputPerToken: number;
}
/**
* Cost table mapping model name prefixes to per-token pricing.
*
* Model matching is prefix-based: "claude-sonnet-4-5" matches "claude-sonnet-4-5-20250929".
* More specific prefixes are checked first (longest match wins).
*
* Prices sourced from provider pricing pages as of 2026-02.
*/
const MODEL_COSTS: Record<string, ModelCost> = {
// Anthropic Claude models (per-token microdollars)
// claude-sonnet-4-5: $3/M input, $15/M output
"claude-sonnet-4-5": { inputPerToken: 3, outputPerToken: 15 },
// claude-opus-4: $15/M input, $75/M output
"claude-opus-4": { inputPerToken: 15, outputPerToken: 75 },
// claude-3-5-haiku / claude-haiku-4-5: $0.80/M input, $4/M output
"claude-haiku-4-5": { inputPerToken: 0.8, outputPerToken: 4 },
"claude-3-5-haiku": { inputPerToken: 0.8, outputPerToken: 4 },
// claude-3-5-sonnet: $3/M input, $15/M output
"claude-3-5-sonnet": { inputPerToken: 3, outputPerToken: 15 },
// claude-3-opus: $15/M input, $75/M output
"claude-3-opus": { inputPerToken: 15, outputPerToken: 75 },
// claude-3-sonnet: $3/M input, $15/M output
"claude-3-sonnet": { inputPerToken: 3, outputPerToken: 15 },
// claude-3-haiku: $0.25/M input, $1.25/M output
"claude-3-haiku": { inputPerToken: 0.25, outputPerToken: 1.25 },
// OpenAI models (per-token microdollars)
// gpt-4o: $2.50/M input, $10/M output
"gpt-4o-mini": { inputPerToken: 0.15, outputPerToken: 0.6 },
"gpt-4o": { inputPerToken: 2.5, outputPerToken: 10 },
// gpt-4-turbo: $10/M input, $30/M output
"gpt-4-turbo": { inputPerToken: 10, outputPerToken: 30 },
// gpt-4: $30/M input, $60/M output
"gpt-4": { inputPerToken: 30, outputPerToken: 60 },
// gpt-3.5-turbo: $0.50/M input, $1.50/M output
"gpt-3.5-turbo": { inputPerToken: 0.5, outputPerToken: 1.5 },
// Ollama / local models: free
// These are catch-all entries; any model not matched above falls through to getModelCost default
};
/**
* Sorted model prefixes from longest to shortest for greedy prefix matching.
* Ensures "gpt-4o-mini" matches before "gpt-4o" and "claude-3-5-haiku" before "claude-3-haiku".
*/
const SORTED_PREFIXES = Object.keys(MODEL_COSTS).sort((a, b) => b.length - a.length);
/**
* Look up per-token cost for a given model name.
*
* Uses longest-prefix matching: the model name is compared against known
* prefixes from longest to shortest. If no prefix matches, returns zero cost
* (assumes local/free model).
*
* @param modelName - Full model name (e.g. "claude-sonnet-4-5-20250929", "gpt-4o")
* @returns Per-token cost in microdollars
*/
export function getModelCost(modelName: string): ModelCost {
const normalized = modelName.toLowerCase();
for (const prefix of SORTED_PREFIXES) {
if (normalized.startsWith(prefix)) {
const cost = MODEL_COSTS[prefix];
if (cost !== undefined) {
return cost;
}
}
}
// Unknown or local model — assume free
return { inputPerToken: 0, outputPerToken: 0 };
}
/**
* Calculate total cost in microdollars for a given model and token counts.
*
* @param modelName - Full model name
* @param inputTokens - Number of input (prompt) tokens
* @param outputTokens - Number of output (completion) tokens
* @returns Total cost in microdollars (USD * 1,000,000)
*/
export function calculateCostMicrodollars(
modelName: string,
inputTokens: number,
outputTokens: number
): number {
const cost = getModelCost(modelName);
return Math.round(cost.inputPerToken * inputTokens + cost.outputPerToken * outputTokens);
}

View File

@@ -0,0 +1,487 @@
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { TaskType, Complexity, Harness, Provider, Outcome } from "@mosaicstack/telemetry-client";
import type { TaskCompletionEvent, EventBuilderParams } from "@mosaicstack/telemetry-client";
import { MosaicTelemetryService } from "../mosaic-telemetry/mosaic-telemetry.service";
import {
LlmTelemetryTrackerService,
estimateTokens,
mapProviderType,
mapHarness,
inferTaskType,
} from "./llm-telemetry-tracker.service";
import type { LlmCompletionParams } from "./llm-telemetry-tracker.service";
import { getModelCost, calculateCostMicrodollars } from "./llm-cost-table";
// ---------- Cost Table Tests ----------
describe("llm-cost-table", () => {
describe("getModelCost", () => {
it("should return cost for claude-sonnet-4-5 models", () => {
const cost = getModelCost("claude-sonnet-4-5-20250929");
expect(cost.inputPerToken).toBe(3);
expect(cost.outputPerToken).toBe(15);
});
it("should return cost for claude-opus-4 models", () => {
const cost = getModelCost("claude-opus-4-6");
expect(cost.inputPerToken).toBe(15);
expect(cost.outputPerToken).toBe(75);
});
it("should return cost for claude-haiku-4-5 models", () => {
const cost = getModelCost("claude-haiku-4-5-20251001");
expect(cost.inputPerToken).toBe(0.8);
expect(cost.outputPerToken).toBe(4);
});
it("should return cost for gpt-4o", () => {
const cost = getModelCost("gpt-4o");
expect(cost.inputPerToken).toBe(2.5);
expect(cost.outputPerToken).toBe(10);
});
it("should return cost for gpt-4o-mini (longer prefix matches first)", () => {
const cost = getModelCost("gpt-4o-mini");
expect(cost.inputPerToken).toBe(0.15);
expect(cost.outputPerToken).toBe(0.6);
});
it("should return zero cost for unknown/local models", () => {
const cost = getModelCost("llama3.2");
expect(cost.inputPerToken).toBe(0);
expect(cost.outputPerToken).toBe(0);
});
it("should return zero cost for ollama models", () => {
const cost = getModelCost("mistral:7b");
expect(cost.inputPerToken).toBe(0);
expect(cost.outputPerToken).toBe(0);
});
it("should be case-insensitive", () => {
const cost = getModelCost("Claude-Sonnet-4-5-20250929");
expect(cost.inputPerToken).toBe(3);
});
});
describe("calculateCostMicrodollars", () => {
it("should calculate cost for claude-sonnet-4-5 with token counts", () => {
// 1000 input tokens * 3 + 500 output tokens * 15 = 3000 + 7500 = 10500
const cost = calculateCostMicrodollars("claude-sonnet-4-5-20250929", 1000, 500);
expect(cost).toBe(10500);
});
it("should return 0 for local models", () => {
const cost = calculateCostMicrodollars("llama3.2", 1000, 500);
expect(cost).toBe(0);
});
it("should return 0 when token counts are 0", () => {
const cost = calculateCostMicrodollars("claude-opus-4-6", 0, 0);
expect(cost).toBe(0);
});
it("should round the result to integer microdollars", () => {
// gpt-4o-mini: 0.15 * 3 + 0.6 * 7 = 0.45 + 4.2 = 4.65 -> rounds to 5
const cost = calculateCostMicrodollars("gpt-4o-mini", 3, 7);
expect(cost).toBe(5);
});
});
});
// ---------- Helper Function Tests ----------
describe("helper functions", () => {
describe("estimateTokens", () => {
it("should estimate ~1 token per 4 characters", () => {
expect(estimateTokens("abcd")).toBe(1);
expect(estimateTokens("abcdefgh")).toBe(2);
});
it("should round up for partial tokens", () => {
expect(estimateTokens("abc")).toBe(1);
expect(estimateTokens("abcde")).toBe(2);
});
it("should return 0 for empty string", () => {
expect(estimateTokens("")).toBe(0);
});
});
describe("mapProviderType", () => {
it("should map claude to ANTHROPIC", () => {
expect(mapProviderType("claude")).toBe(Provider.ANTHROPIC);
});
it("should map openai to OPENAI", () => {
expect(mapProviderType("openai")).toBe(Provider.OPENAI);
});
it("should map ollama to OLLAMA", () => {
expect(mapProviderType("ollama")).toBe(Provider.OLLAMA);
});
});
describe("mapHarness", () => {
it("should map ollama to OLLAMA_LOCAL", () => {
expect(mapHarness("ollama")).toBe(Harness.OLLAMA_LOCAL);
});
it("should map claude to API_DIRECT", () => {
expect(mapHarness("claude")).toBe(Harness.API_DIRECT);
});
it("should map openai to API_DIRECT", () => {
expect(mapHarness("openai")).toBe(Harness.API_DIRECT);
});
});
describe("inferTaskType", () => {
it("should return IMPLEMENTATION for embed operation", () => {
expect(inferTaskType("embed")).toBe(TaskType.IMPLEMENTATION);
});
it("should return UNKNOWN when no context provided for chat", () => {
expect(inferTaskType("chat")).toBe(TaskType.UNKNOWN);
});
it("should return PLANNING for brain context", () => {
expect(inferTaskType("chat", "brain")).toBe(TaskType.PLANNING);
});
it("should return PLANNING for planning context", () => {
expect(inferTaskType("chat", "planning")).toBe(TaskType.PLANNING);
});
it("should return CODE_REVIEW for review context", () => {
expect(inferTaskType("chat", "code-review")).toBe(TaskType.CODE_REVIEW);
});
it("should return TESTING for test context", () => {
expect(inferTaskType("chat", "test-generation")).toBe(TaskType.TESTING);
});
it("should return DEBUGGING for debug context", () => {
expect(inferTaskType("chatStream", "debug-session")).toBe(TaskType.DEBUGGING);
});
it("should return REFACTORING for refactor context", () => {
expect(inferTaskType("chat", "refactor")).toBe(TaskType.REFACTORING);
});
it("should return DOCUMENTATION for doc context", () => {
expect(inferTaskType("chat", "documentation")).toBe(TaskType.DOCUMENTATION);
});
it("should return CONFIGURATION for config context", () => {
expect(inferTaskType("chat", "config-update")).toBe(TaskType.CONFIGURATION);
});
it("should return SECURITY_AUDIT for security context", () => {
expect(inferTaskType("chat", "security-check")).toBe(TaskType.SECURITY_AUDIT);
});
it("should return IMPLEMENTATION for chat context", () => {
expect(inferTaskType("chat", "chat")).toBe(TaskType.IMPLEMENTATION);
});
it("should be case-insensitive", () => {
expect(inferTaskType("chat", "BRAIN")).toBe(TaskType.PLANNING);
});
it("should return UNKNOWN for unrecognized context", () => {
expect(inferTaskType("chat", "something-else")).toBe(TaskType.UNKNOWN);
});
});
});
// ---------- LlmTelemetryTrackerService Tests ----------
describe("LlmTelemetryTrackerService", () => {
let service: LlmTelemetryTrackerService;
let mockTelemetryService: {
eventBuilder: { build: ReturnType<typeof vi.fn> } | null;
trackTaskCompletion: ReturnType<typeof vi.fn>;
isEnabled: boolean;
};
const mockEvent: TaskCompletionEvent = {
instance_id: "test-instance",
event_id: "test-event",
schema_version: "1.0.0",
timestamp: new Date().toISOString(),
task_duration_ms: 1000,
task_type: TaskType.IMPLEMENTATION,
complexity: Complexity.LOW,
harness: Harness.API_DIRECT,
model: "claude-sonnet-4-5-20250929",
provider: Provider.ANTHROPIC,
estimated_input_tokens: 100,
estimated_output_tokens: 200,
actual_input_tokens: 100,
actual_output_tokens: 200,
estimated_cost_usd_micros: 3300,
actual_cost_usd_micros: 3300,
quality_gate_passed: true,
quality_gates_run: [],
quality_gates_failed: [],
context_compactions: 0,
context_rotations: 0,
context_utilization_final: 0,
outcome: Outcome.SUCCESS,
retry_count: 0,
};
beforeEach(async () => {
mockTelemetryService = {
eventBuilder: {
build: vi.fn().mockReturnValue(mockEvent),
},
trackTaskCompletion: vi.fn(),
isEnabled: true,
};
const module: TestingModule = await Test.createTestingModule({
providers: [
LlmTelemetryTrackerService,
{
provide: MosaicTelemetryService,
useValue: mockTelemetryService,
},
],
}).compile();
service = module.get<LlmTelemetryTrackerService>(LlmTelemetryTrackerService);
});
it("should be defined", () => {
expect(service).toBeDefined();
});
describe("trackLlmCompletion", () => {
const baseParams: LlmCompletionParams = {
model: "claude-sonnet-4-5-20250929",
providerType: "claude",
operation: "chat",
durationMs: 1200,
inputTokens: 150,
outputTokens: 300,
callingContext: "chat",
success: true,
};
it("should build and track a telemetry event for Anthropic provider", () => {
service.trackLlmCompletion(baseParams);
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
task_duration_ms: 1200,
task_type: TaskType.IMPLEMENTATION,
complexity: Complexity.LOW,
harness: Harness.API_DIRECT,
model: "claude-sonnet-4-5-20250929",
provider: Provider.ANTHROPIC,
actual_input_tokens: 150,
actual_output_tokens: 300,
outcome: Outcome.SUCCESS,
})
);
expect(mockTelemetryService.trackTaskCompletion).toHaveBeenCalledWith(mockEvent);
});
it("should build and track a telemetry event for OpenAI provider", () => {
service.trackLlmCompletion({
...baseParams,
model: "gpt-4o",
providerType: "openai",
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
model: "gpt-4o",
provider: Provider.OPENAI,
harness: Harness.API_DIRECT,
})
);
});
it("should build and track a telemetry event for Ollama provider", () => {
service.trackLlmCompletion({
...baseParams,
model: "llama3.2",
providerType: "ollama",
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
model: "llama3.2",
provider: Provider.OLLAMA,
harness: Harness.OLLAMA_LOCAL,
})
);
});
it("should calculate cost in microdollars correctly", () => {
service.trackLlmCompletion(baseParams);
// claude-sonnet-4-5: 150 * 3 + 300 * 15 = 450 + 4500 = 4950
const expectedActualCost = 4950;
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
// Estimated values are 0 when no PredictionService is injected
estimated_cost_usd_micros: 0,
actual_cost_usd_micros: expectedActualCost,
})
);
});
it("should calculate zero cost for ollama models", () => {
service.trackLlmCompletion({
...baseParams,
model: "llama3.2",
providerType: "ollama",
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
estimated_cost_usd_micros: 0,
actual_cost_usd_micros: 0,
})
);
});
it("should track FAILURE outcome when success is false", () => {
service.trackLlmCompletion({
...baseParams,
success: false,
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
outcome: Outcome.FAILURE,
})
);
});
it("should infer task type from calling context", () => {
service.trackLlmCompletion({
...baseParams,
callingContext: "brain",
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
task_type: TaskType.PLANNING,
})
);
});
it("should set empty quality gates arrays for direct LLM calls", () => {
service.trackLlmCompletion(baseParams);
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
quality_gate_passed: true,
quality_gates_run: [],
quality_gates_failed: [],
})
);
});
it("should silently skip when telemetry is disabled (eventBuilder is null)", () => {
mockTelemetryService.eventBuilder = null;
// Should not throw
service.trackLlmCompletion(baseParams);
expect(mockTelemetryService.trackTaskCompletion).not.toHaveBeenCalled();
});
it("should not throw when eventBuilder.build throws an error", () => {
mockTelemetryService.eventBuilder = {
build: vi.fn().mockImplementation(() => {
throw new Error("Build failed");
}),
};
// Should not throw
expect(() => service.trackLlmCompletion(baseParams)).not.toThrow();
});
it("should not throw when trackTaskCompletion throws an error", () => {
mockTelemetryService.trackTaskCompletion.mockImplementation(() => {
throw new Error("Track failed");
});
// Should not throw
expect(() => service.trackLlmCompletion(baseParams)).not.toThrow();
});
it("should handle streaming operation with estimated tokens", () => {
service.trackLlmCompletion({
...baseParams,
operation: "chatStream",
inputTokens: 50,
outputTokens: 100,
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
actual_input_tokens: 50,
actual_output_tokens: 100,
// Estimated values are 0 when no PredictionService is injected
estimated_input_tokens: 0,
estimated_output_tokens: 0,
})
);
});
it("should handle embed operation", () => {
service.trackLlmCompletion({
...baseParams,
operation: "embed",
outputTokens: 0,
callingContext: undefined,
});
expect(mockTelemetryService.eventBuilder?.build).toHaveBeenCalledWith(
expect.objectContaining({
task_type: TaskType.IMPLEMENTATION,
actual_output_tokens: 0,
})
);
});
it("should pass all required EventBuilderParams fields", () => {
service.trackLlmCompletion(baseParams);
const buildCall = (mockTelemetryService.eventBuilder?.build as ReturnType<typeof vi.fn>).mock
.calls[0][0] as EventBuilderParams;
// Verify all required fields are present
expect(buildCall).toHaveProperty("task_duration_ms");
expect(buildCall).toHaveProperty("task_type");
expect(buildCall).toHaveProperty("complexity");
expect(buildCall).toHaveProperty("harness");
expect(buildCall).toHaveProperty("model");
expect(buildCall).toHaveProperty("provider");
expect(buildCall).toHaveProperty("estimated_input_tokens");
expect(buildCall).toHaveProperty("estimated_output_tokens");
expect(buildCall).toHaveProperty("actual_input_tokens");
expect(buildCall).toHaveProperty("actual_output_tokens");
expect(buildCall).toHaveProperty("estimated_cost_usd_micros");
expect(buildCall).toHaveProperty("actual_cost_usd_micros");
expect(buildCall).toHaveProperty("quality_gate_passed");
expect(buildCall).toHaveProperty("quality_gates_run");
expect(buildCall).toHaveProperty("quality_gates_failed");
expect(buildCall).toHaveProperty("context_compactions");
expect(buildCall).toHaveProperty("context_rotations");
expect(buildCall).toHaveProperty("context_utilization_final");
expect(buildCall).toHaveProperty("outcome");
expect(buildCall).toHaveProperty("retry_count");
});
});
});

View File

@@ -0,0 +1,224 @@
import { Injectable, Logger, Optional } from "@nestjs/common";
import { MosaicTelemetryService } from "../mosaic-telemetry/mosaic-telemetry.service";
import { PredictionService } from "../mosaic-telemetry/prediction.service";
import { TaskType, Complexity, Harness, Provider, Outcome } from "@mosaicstack/telemetry-client";
import type { LlmProviderType } from "./providers/llm-provider.interface";
import { calculateCostMicrodollars } from "./llm-cost-table";
/**
* Parameters for tracking an LLM completion event.
*/
export interface LlmCompletionParams {
/** Full model name (e.g. "claude-sonnet-4-5-20250929") */
model: string;
/** Provider type discriminator */
providerType: LlmProviderType;
/** Operation type that was performed */
operation: "chat" | "chatStream" | "embed";
/** Duration of the LLM call in milliseconds */
durationMs: number;
/** Number of input (prompt) tokens consumed */
inputTokens: number;
/** Number of output (completion) tokens generated */
outputTokens: number;
/**
* Optional calling context hint for task type inference.
* Examples: "brain", "chat", "embed", "planning", "code-review"
*/
callingContext?: string | undefined;
/** Whether the call succeeded or failed */
success: boolean;
}
/**
* Estimated token count from text length.
* Uses a rough approximation of ~4 characters per token (GPT/Claude average).
*/
export function estimateTokens(text: string): number {
return Math.ceil(text.length / 4);
}
/** Map LLM provider type to telemetry Provider enum */
export function mapProviderType(providerType: LlmProviderType): Provider {
switch (providerType) {
case "claude":
return Provider.ANTHROPIC;
case "openai":
return Provider.OPENAI;
case "ollama":
return Provider.OLLAMA;
default:
return Provider.UNKNOWN;
}
}
/** Map LLM provider type to telemetry Harness enum */
export function mapHarness(providerType: LlmProviderType): Harness {
switch (providerType) {
case "ollama":
return Harness.OLLAMA_LOCAL;
default:
return Harness.API_DIRECT;
}
}
/**
* Infer the task type from calling context and operation.
*
* @param operation - The LLM operation (chat, chatStream, embed)
* @param callingContext - Optional hint about the caller's purpose
* @returns Inferred TaskType
*/
export function inferTaskType(
operation: "chat" | "chatStream" | "embed",
callingContext?: string
): TaskType {
// Embedding operations are typically for indexing/search
if (operation === "embed") {
return TaskType.IMPLEMENTATION;
}
if (!callingContext) {
return TaskType.UNKNOWN;
}
const ctx = callingContext.toLowerCase();
if (ctx.includes("brain") || ctx.includes("planning") || ctx.includes("plan")) {
return TaskType.PLANNING;
}
if (ctx.includes("review") || ctx.includes("code-review")) {
return TaskType.CODE_REVIEW;
}
if (ctx.includes("test")) {
return TaskType.TESTING;
}
if (ctx.includes("debug")) {
return TaskType.DEBUGGING;
}
if (ctx.includes("refactor")) {
return TaskType.REFACTORING;
}
if (ctx.includes("doc")) {
return TaskType.DOCUMENTATION;
}
if (ctx.includes("config")) {
return TaskType.CONFIGURATION;
}
if (ctx.includes("security") || ctx.includes("audit")) {
return TaskType.SECURITY_AUDIT;
}
if (ctx.includes("chat") || ctx.includes("implement")) {
return TaskType.IMPLEMENTATION;
}
return TaskType.UNKNOWN;
}
/**
* LLM Telemetry Tracker Service
*
* Builds and submits telemetry events for LLM completions.
* All tracking is non-blocking and fire-and-forget; telemetry errors
* never propagate to the caller.
*
* @example
* ```typescript
* // After a successful chat completion
* this.telemetryTracker.trackLlmCompletion({
* model: "claude-sonnet-4-5-20250929",
* providerType: "claude",
* operation: "chat",
* durationMs: 1200,
* inputTokens: 150,
* outputTokens: 300,
* callingContext: "chat",
* success: true,
* });
* ```
*/
@Injectable()
export class LlmTelemetryTrackerService {
private readonly logger = new Logger(LlmTelemetryTrackerService.name);
constructor(
private readonly telemetry: MosaicTelemetryService,
@Optional() private readonly predictionService?: PredictionService
) {}
/**
* Track an LLM completion event via Mosaic Telemetry.
*
* This method is intentionally fire-and-forget. It catches all errors
* internally and logs them without propagating to the caller.
*
* @param params - LLM completion parameters
*/
trackLlmCompletion(params: LlmCompletionParams): void {
try {
const builder = this.telemetry.eventBuilder;
if (!builder) {
// Telemetry is disabled — silently skip
return;
}
const taskType = inferTaskType(params.operation, params.callingContext);
const provider = mapProviderType(params.providerType);
const costMicrodollars = calculateCostMicrodollars(
params.model,
params.inputTokens,
params.outputTokens
);
// Query predictions for estimated fields (graceful degradation)
let estimatedInputTokens = 0;
let estimatedOutputTokens = 0;
let estimatedCostMicros = 0;
if (this.predictionService) {
const prediction = this.predictionService.getEstimate(
taskType,
params.model,
provider,
Complexity.LOW
);
if (prediction?.prediction && prediction.metadata.confidence !== "none") {
estimatedInputTokens = prediction.prediction.input_tokens.median;
estimatedOutputTokens = prediction.prediction.output_tokens.median;
estimatedCostMicros = prediction.prediction.cost_usd_micros.median ?? 0;
}
}
const event = builder.build({
task_duration_ms: params.durationMs,
task_type: taskType,
complexity: Complexity.LOW,
harness: mapHarness(params.providerType),
model: params.model,
provider,
estimated_input_tokens: estimatedInputTokens,
estimated_output_tokens: estimatedOutputTokens,
actual_input_tokens: params.inputTokens,
actual_output_tokens: params.outputTokens,
estimated_cost_usd_micros: estimatedCostMicros,
actual_cost_usd_micros: costMicrodollars,
quality_gate_passed: true,
quality_gates_run: [],
quality_gates_failed: [],
context_compactions: 0,
context_rotations: 0,
context_utilization_final: 0,
outcome: params.success ? Outcome.SUCCESS : Outcome.FAILURE,
retry_count: 0,
});
this.telemetry.trackTaskCompletion(event);
} catch (error: unknown) {
// Never let telemetry errors propagate
const msg = error instanceof Error ? error.message : String(error);
this.logger.warn(`Failed to track LLM telemetry event: ${msg}`);
}
}
}

View File

@@ -3,13 +3,14 @@ import { LlmController } from "./llm.controller";
import { LlmProviderAdminController } from "./llm-provider-admin.controller";
import { LlmService } from "./llm.service";
import { LlmManagerService } from "./llm-manager.service";
import { LlmTelemetryTrackerService } from "./llm-telemetry-tracker.service";
import { PrismaModule } from "../prisma/prisma.module";
import { LlmUsageModule } from "../llm-usage/llm-usage.module";
@Module({
imports: [PrismaModule, LlmUsageModule],
controllers: [LlmController, LlmProviderAdminController],
providers: [LlmService, LlmManagerService],
providers: [LlmService, LlmManagerService, LlmTelemetryTrackerService],
exports: [LlmService, LlmManagerService],
})
export class LlmModule {}

View File

@@ -3,6 +3,7 @@ import { Test, TestingModule } from "@nestjs/testing";
import { ServiceUnavailableException } from "@nestjs/common";
import { LlmService } from "./llm.service";
import { LlmManagerService } from "./llm-manager.service";
import { LlmTelemetryTrackerService } from "./llm-telemetry-tracker.service";
import type { ChatRequestDto, EmbedRequestDto, ChatResponseDto, EmbedResponseDto } from "./dto";
import type {
LlmProviderInterface,
@@ -14,6 +15,9 @@ describe("LlmService", () => {
let mockManagerService: {
getDefaultProvider: ReturnType<typeof vi.fn>;
};
let mockTelemetryTracker: {
trackLlmCompletion: ReturnType<typeof vi.fn>;
};
let mockProvider: {
chat: ReturnType<typeof vi.fn>;
chatStream: ReturnType<typeof vi.fn>;
@@ -41,6 +45,11 @@ describe("LlmService", () => {
getDefaultProvider: vi.fn().mockResolvedValue(mockProvider),
};
// Create mock telemetry tracker
mockTelemetryTracker = {
trackLlmCompletion: vi.fn(),
};
const module: TestingModule = await Test.createTestingModule({
providers: [
LlmService,
@@ -48,6 +57,10 @@ describe("LlmService", () => {
provide: LlmManagerService,
useValue: mockManagerService,
},
{
provide: LlmTelemetryTrackerService,
useValue: mockTelemetryTracker,
},
],
}).compile();
@@ -135,6 +148,45 @@ describe("LlmService", () => {
expect(result).toEqual(response);
});
it("should track telemetry on successful chat", async () => {
const response: ChatResponseDto = {
model: "llama3.2",
message: { role: "assistant", content: "Hello" },
done: true,
promptEvalCount: 10,
evalCount: 20,
};
mockProvider.chat.mockResolvedValue(response);
await service.chat(request, "chat");
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
model: "llama3.2",
providerType: "ollama",
operation: "chat",
inputTokens: 10,
outputTokens: 20,
callingContext: "chat",
success: true,
})
);
});
it("should track telemetry on failed chat", async () => {
mockProvider.chat.mockRejectedValue(new Error("Chat failed"));
await expect(service.chat(request)).rejects.toThrow(ServiceUnavailableException);
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
model: "llama3.2",
operation: "chat",
success: false,
})
);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.chat.mockRejectedValue(new Error("Chat failed"));
@@ -177,6 +229,94 @@ describe("LlmService", () => {
expect(chunks[1].message.content).toBe(" world");
});
it("should track telemetry after stream completes", async () => {
async function* mockGenerator(): AsyncGenerator<ChatResponseDto> {
yield {
model: "llama3.2",
message: { role: "assistant", content: "Hello" },
done: false,
};
yield {
model: "llama3.2",
message: { role: "assistant", content: " world" },
done: true,
promptEvalCount: 5,
evalCount: 10,
};
}
mockProvider.chatStream.mockReturnValue(mockGenerator());
const chunks: ChatResponseDto[] = [];
for await (const chunk of service.chatStream(request, "brain")) {
chunks.push(chunk);
}
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
model: "llama3.2",
providerType: "ollama",
operation: "chatStream",
inputTokens: 5,
outputTokens: 10,
callingContext: "brain",
success: true,
})
);
});
it("should estimate tokens when provider does not return counts in stream", async () => {
async function* mockGenerator(): AsyncGenerator<ChatResponseDto> {
yield {
model: "llama3.2",
message: { role: "assistant", content: "Hello world" },
done: false,
};
yield {
model: "llama3.2",
message: { role: "assistant", content: "" },
done: true,
};
}
mockProvider.chatStream.mockReturnValue(mockGenerator());
const chunks: ChatResponseDto[] = [];
for await (const chunk of service.chatStream(request)) {
chunks.push(chunk);
}
// Should use estimated tokens since no actual counts provided
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
operation: "chatStream",
success: true,
// Input estimated from "Hi" -> ceil(2/4) = 1
inputTokens: 1,
// Output estimated from "Hello world" -> ceil(11/4) = 3
outputTokens: 3,
})
);
});
it("should track telemetry on stream failure", async () => {
async function* errorGenerator(): AsyncGenerator<ChatResponseDto> {
throw new Error("Stream failed");
}
mockProvider.chatStream.mockReturnValue(errorGenerator());
const generator = service.chatStream(request);
await expect(generator.next()).rejects.toThrow(ServiceUnavailableException);
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
operation: "chatStream",
success: false,
})
);
});
it("should throw ServiceUnavailableException on error", async () => {
async function* errorGenerator(): AsyncGenerator<ChatResponseDto> {
throw new Error("Stream failed");
@@ -210,6 +350,41 @@ describe("LlmService", () => {
expect(result).toEqual(response);
});
it("should track telemetry on successful embed", async () => {
const response: EmbedResponseDto = {
model: "llama3.2",
embeddings: [[0.1, 0.2, 0.3]],
totalDuration: 500,
};
mockProvider.embed.mockResolvedValue(response);
await service.embed(request, "embed");
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
model: "llama3.2",
providerType: "ollama",
operation: "embed",
outputTokens: 0,
callingContext: "embed",
success: true,
})
);
});
it("should track telemetry on failed embed", async () => {
mockProvider.embed.mockRejectedValue(new Error("Embedding failed"));
await expect(service.embed(request)).rejects.toThrow(ServiceUnavailableException);
expect(mockTelemetryTracker.trackLlmCompletion).toHaveBeenCalledWith(
expect.objectContaining({
operation: "embed",
success: false,
})
);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.embed.mockRejectedValue(new Error("Embedding failed"));

View File

@@ -1,13 +1,15 @@
import { Injectable, OnModuleInit, Logger, ServiceUnavailableException } from "@nestjs/common";
import { LlmManagerService } from "./llm-manager.service";
import { LlmTelemetryTrackerService, estimateTokens } from "./llm-telemetry-tracker.service";
import type { ChatRequestDto, ChatResponseDto, EmbedRequestDto, EmbedResponseDto } from "./dto";
import type { LlmProviderHealthStatus } from "./providers/llm-provider.interface";
import type { LlmProviderHealthStatus, LlmProviderType } from "./providers/llm-provider.interface";
/**
* LLM Service
*
* High-level service for LLM operations. Delegates to providers via LlmManagerService.
* Maintains backward compatibility with the original API while supporting multiple providers.
* Automatically tracks completions via Mosaic Telemetry (non-blocking).
*
* @example
* ```typescript
@@ -33,7 +35,10 @@ import type { LlmProviderHealthStatus } from "./providers/llm-provider.interface
export class LlmService implements OnModuleInit {
private readonly logger = new Logger(LlmService.name);
constructor(private readonly llmManager: LlmManagerService) {
constructor(
private readonly llmManager: LlmManagerService,
private readonly telemetryTracker: LlmTelemetryTrackerService
) {
this.logger.log("LLM service initialized");
}
@@ -91,14 +96,45 @@ export class LlmService implements OnModuleInit {
* Perform a synchronous chat completion.
*
* @param request - Chat request with messages and configuration
* @param callingContext - Optional context hint for telemetry task type inference
* @returns Complete chat response
* @throws {ServiceUnavailableException} If provider is unavailable or request fails
*/
async chat(request: ChatRequestDto): Promise<ChatResponseDto> {
async chat(request: ChatRequestDto, callingContext?: string): Promise<ChatResponseDto> {
const startTime = Date.now();
let providerType: LlmProviderType = "ollama";
try {
const provider = await this.llmManager.getDefaultProvider();
return await provider.chat(request);
providerType = provider.type;
const response = await provider.chat(request);
// Fire-and-forget telemetry tracking
this.telemetryTracker.trackLlmCompletion({
model: response.model,
providerType,
operation: "chat",
durationMs: Date.now() - startTime,
inputTokens: response.promptEvalCount ?? 0,
outputTokens: response.evalCount ?? 0,
callingContext,
success: true,
});
return response;
} catch (error: unknown) {
// Track failure (fire-and-forget)
this.telemetryTracker.trackLlmCompletion({
model: request.model,
providerType,
operation: "chat",
durationMs: Date.now() - startTime,
inputTokens: 0,
outputTokens: 0,
callingContext,
success: false,
});
const errorMessage = error instanceof Error ? error.message : String(error);
this.logger.error(`Chat failed: ${errorMessage}`);
throw new ServiceUnavailableException(`Chat completion failed: ${errorMessage}`);
@@ -107,20 +143,75 @@ export class LlmService implements OnModuleInit {
/**
* Perform a streaming chat completion.
* Yields response chunks as they arrive from the provider.
* Aggregates token usage and tracks telemetry after the stream ends.
*
* @param request - Chat request with messages and configuration
* @param callingContext - Optional context hint for telemetry task type inference
* @yields Chat response chunks
* @throws {ServiceUnavailableException} If provider is unavailable or request fails
*/
async *chatStream(request: ChatRequestDto): AsyncGenerator<ChatResponseDto, void, unknown> {
async *chatStream(
request: ChatRequestDto,
callingContext?: string
): AsyncGenerator<ChatResponseDto, void, unknown> {
const startTime = Date.now();
let providerType: LlmProviderType = "ollama";
let aggregatedContent = "";
let lastChunkInputTokens = 0;
let lastChunkOutputTokens = 0;
try {
const provider = await this.llmManager.getDefaultProvider();
providerType = provider.type;
const stream = provider.chatStream(request);
for await (const chunk of stream) {
// Accumulate content for token estimation
aggregatedContent += chunk.message.content;
// Some providers include token counts on the final chunk
if (chunk.promptEvalCount !== undefined) {
lastChunkInputTokens = chunk.promptEvalCount;
}
if (chunk.evalCount !== undefined) {
lastChunkOutputTokens = chunk.evalCount;
}
yield chunk;
}
// After stream completes, track telemetry
// Use actual token counts if available, otherwise estimate from content length
const inputTokens =
lastChunkInputTokens > 0
? lastChunkInputTokens
: estimateTokens(request.messages.map((m) => m.content).join(" "));
const outputTokens =
lastChunkOutputTokens > 0 ? lastChunkOutputTokens : estimateTokens(aggregatedContent);
this.telemetryTracker.trackLlmCompletion({
model: request.model,
providerType,
operation: "chatStream",
durationMs: Date.now() - startTime,
inputTokens,
outputTokens,
callingContext,
success: true,
});
} catch (error: unknown) {
// Track failure (fire-and-forget)
this.telemetryTracker.trackLlmCompletion({
model: request.model,
providerType,
operation: "chatStream",
durationMs: Date.now() - startTime,
inputTokens: 0,
outputTokens: 0,
callingContext,
success: false,
});
const errorMessage = error instanceof Error ? error.message : String(error);
this.logger.error(`Stream failed: ${errorMessage}`);
throw new ServiceUnavailableException(`Streaming failed: ${errorMessage}`);
@@ -130,14 +221,48 @@ export class LlmService implements OnModuleInit {
* Generate embeddings for the given input texts.
*
* @param request - Embedding request with model and input texts
* @param callingContext - Optional context hint for telemetry task type inference
* @returns Embeddings response with vector arrays
* @throws {ServiceUnavailableException} If provider is unavailable or request fails
*/
async embed(request: EmbedRequestDto): Promise<EmbedResponseDto> {
async embed(request: EmbedRequestDto, callingContext?: string): Promise<EmbedResponseDto> {
const startTime = Date.now();
let providerType: LlmProviderType = "ollama";
try {
const provider = await this.llmManager.getDefaultProvider();
return await provider.embed(request);
providerType = provider.type;
const response = await provider.embed(request);
// Estimate input tokens from the input text
const inputTokens = estimateTokens(request.input.join(" "));
// Fire-and-forget telemetry tracking
this.telemetryTracker.trackLlmCompletion({
model: response.model,
providerType,
operation: "embed",
durationMs: Date.now() - startTime,
inputTokens,
outputTokens: 0, // Embeddings don't produce output tokens
callingContext,
success: true,
});
return response;
} catch (error: unknown) {
// Track failure (fire-and-forget)
this.telemetryTracker.trackLlmCompletion({
model: request.model,
providerType,
operation: "embed",
durationMs: Date.now() - startTime,
inputTokens: 0,
outputTokens: 0,
callingContext,
success: false,
});
const errorMessage = error instanceof Error ? error.message : String(error);
this.logger.error(`Embed failed: ${errorMessage}`);
throw new ServiceUnavailableException(`Embedding failed: ${errorMessage}`);

View File

@@ -0,0 +1,17 @@
/**
* Mosaic Telemetry module — task completion tracking and crowd-sourced predictions.
*
* **Not to be confused with the OpenTelemetry (OTEL) TelemetryModule** at
* `src/telemetry/`, which handles distributed request tracing.
*
* @module mosaic-telemetry
*/
export { MosaicTelemetryModule } from "./mosaic-telemetry.module";
export { MosaicTelemetryService } from "./mosaic-telemetry.service";
export {
loadMosaicTelemetryConfig,
toSdkConfig,
MOSAIC_TELEMETRY_ENV,
type MosaicTelemetryModuleConfig,
} from "./mosaic-telemetry.config";

View File

@@ -0,0 +1,78 @@
import type { ConfigService } from "@nestjs/config";
import type { TelemetryConfig } from "@mosaicstack/telemetry-client";
/**
* Configuration interface for the Mosaic Telemetry module.
* Maps environment variables to SDK configuration.
*/
export interface MosaicTelemetryModuleConfig {
/** Whether telemetry collection is enabled. Default: true */
enabled: boolean;
/** Base URL of the telemetry server */
serverUrl: string;
/** API key for authentication (64-char hex string) */
apiKey: string;
/** Instance UUID for this client */
instanceId: string;
/** If true, log events instead of sending them. Default: false */
dryRun: boolean;
}
/**
* Environment variable names used by the Mosaic Telemetry module.
*/
export const MOSAIC_TELEMETRY_ENV = {
ENABLED: "MOSAIC_TELEMETRY_ENABLED",
SERVER_URL: "MOSAIC_TELEMETRY_SERVER_URL",
API_KEY: "MOSAIC_TELEMETRY_API_KEY",
INSTANCE_ID: "MOSAIC_TELEMETRY_INSTANCE_ID",
DRY_RUN: "MOSAIC_TELEMETRY_DRY_RUN",
} as const;
/**
* Read Mosaic Telemetry configuration from environment variables via NestJS ConfigService.
*
* @param configService - NestJS ConfigService instance
* @returns Parsed module configuration
*/
export function loadMosaicTelemetryConfig(
configService: ConfigService
): MosaicTelemetryModuleConfig {
const enabledRaw = configService.get<string>(MOSAIC_TELEMETRY_ENV.ENABLED, "true");
const dryRunRaw = configService.get<string>(MOSAIC_TELEMETRY_ENV.DRY_RUN, "false");
return {
enabled: enabledRaw.toLowerCase() === "true",
serverUrl: configService.get<string>(MOSAIC_TELEMETRY_ENV.SERVER_URL, ""),
apiKey: configService.get<string>(MOSAIC_TELEMETRY_ENV.API_KEY, ""),
instanceId: configService.get<string>(MOSAIC_TELEMETRY_ENV.INSTANCE_ID, ""),
dryRun: dryRunRaw.toLowerCase() === "true",
};
}
/**
* Convert module config to SDK TelemetryConfig format.
* Includes the onError callback for NestJS Logger integration.
*
* @param config - Module configuration
* @param onError - Error callback (typically NestJS Logger)
* @returns SDK-compatible TelemetryConfig
*/
export function toSdkConfig(
config: MosaicTelemetryModuleConfig,
onError?: (error: Error) => void
): TelemetryConfig {
const sdkConfig: TelemetryConfig = {
serverUrl: config.serverUrl,
apiKey: config.apiKey,
instanceId: config.instanceId,
enabled: config.enabled,
dryRun: config.dryRun,
};
if (onError) {
sdkConfig.onError = onError;
}
return sdkConfig;
}

View File

@@ -0,0 +1,92 @@
import { Controller, Get, Query, UseGuards, BadRequestException } from "@nestjs/common";
import { AuthGuard } from "../auth/guards/auth.guard";
import { PredictionService } from "./prediction.service";
import {
TaskType,
Complexity,
Provider,
type PredictionResponse,
} from "@mosaicstack/telemetry-client";
/**
* Valid values for query parameter validation.
*/
const VALID_TASK_TYPES = new Set<string>(Object.values(TaskType));
const VALID_COMPLEXITIES = new Set<string>(Object.values(Complexity));
const VALID_PROVIDERS = new Set<string>(Object.values(Provider));
/**
* Response DTO for the estimate endpoint.
*/
interface EstimateResponseDto {
data: PredictionResponse | null;
}
/**
* Mosaic Telemetry Controller
*
* Provides API endpoints for accessing telemetry prediction data.
* All endpoints require authentication via AuthGuard.
*
* This controller is intentionally lightweight - it delegates to PredictionService
* for the actual prediction logic and returns results directly to the frontend.
*/
@Controller("telemetry")
@UseGuards(AuthGuard)
export class MosaicTelemetryController {
constructor(private readonly predictionService: PredictionService) {}
/**
* GET /api/telemetry/estimate
*
* Get a cost/token estimate for a given task configuration.
* Returns prediction data including confidence level, or null if
* no prediction is available.
*
* @param taskType - Task type enum value (e.g. "implementation", "planning")
* @param model - Model name (e.g. "claude-sonnet-4-5")
* @param provider - Provider enum value (e.g. "anthropic", "openai")
* @param complexity - Complexity level (e.g. "low", "medium", "high")
* @returns Prediction response with estimates and confidence
*/
@Get("estimate")
getEstimate(
@Query("taskType") taskType: string,
@Query("model") model: string,
@Query("provider") provider: string,
@Query("complexity") complexity: string
): EstimateResponseDto {
if (!taskType || !model || !provider || !complexity) {
throw new BadRequestException(
"Missing query parameters. Required: taskType, model, provider, complexity"
);
}
if (!VALID_TASK_TYPES.has(taskType)) {
throw new BadRequestException(
`Invalid taskType "${taskType}". Valid values: ${[...VALID_TASK_TYPES].join(", ")}`
);
}
if (!VALID_PROVIDERS.has(provider)) {
throw new BadRequestException(
`Invalid provider "${provider}". Valid values: ${[...VALID_PROVIDERS].join(", ")}`
);
}
if (!VALID_COMPLEXITIES.has(complexity)) {
throw new BadRequestException(
`Invalid complexity "${complexity}". Valid values: ${[...VALID_COMPLEXITIES].join(", ")}`
);
}
const prediction = this.predictionService.getEstimate(
taskType as TaskType,
model,
provider as Provider,
complexity as Complexity
);
return { data: prediction };
}
}

View File

@@ -0,0 +1,212 @@
import { describe, it, expect, vi, beforeEach } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { ConfigModule } from "@nestjs/config";
import { MosaicTelemetryModule } from "./mosaic-telemetry.module";
import { MosaicTelemetryService } from "./mosaic-telemetry.service";
// Mock the telemetry client to avoid real HTTP calls
vi.mock("@mosaicstack/telemetry-client", async (importOriginal) => {
const actual = await importOriginal<typeof import("@mosaicstack/telemetry-client")>();
class MockTelemetryClient {
private _isRunning = false;
constructor(_config: unknown) {
// no-op
}
get eventBuilder() {
return { build: vi.fn().mockReturnValue({ event_id: "test-event-id" }) };
}
start(): void {
this._isRunning = true;
}
async stop(): Promise<void> {
this._isRunning = false;
}
track(_event: unknown): void {
// no-op
}
getPrediction(_query: unknown): unknown {
return null;
}
async refreshPredictions(_queries: unknown): Promise<void> {
// no-op
}
get queueSize(): number {
return 0;
}
get isRunning(): boolean {
return this._isRunning;
}
}
return {
...actual,
TelemetryClient: MockTelemetryClient,
};
});
describe("MosaicTelemetryModule", () => {
let module: TestingModule;
beforeEach(() => {
vi.clearAllMocks();
});
describe("module initialization", () => {
it("should compile the module successfully", async () => {
module = await Test.createTestingModule({
imports: [
ConfigModule.forRoot({
isGlobal: true,
envFilePath: [],
load: [
() => ({
MOSAIC_TELEMETRY_ENABLED: "false",
}),
],
}),
MosaicTelemetryModule,
],
}).compile();
expect(module).toBeDefined();
await module.close();
});
it("should provide MosaicTelemetryService", async () => {
module = await Test.createTestingModule({
imports: [
ConfigModule.forRoot({
isGlobal: true,
envFilePath: [],
load: [
() => ({
MOSAIC_TELEMETRY_ENABLED: "false",
}),
],
}),
MosaicTelemetryModule,
],
}).compile();
const service = module.get<MosaicTelemetryService>(MosaicTelemetryService);
expect(service).toBeDefined();
expect(service).toBeInstanceOf(MosaicTelemetryService);
await module.close();
});
it("should export MosaicTelemetryService for injection in other modules", async () => {
module = await Test.createTestingModule({
imports: [
ConfigModule.forRoot({
isGlobal: true,
envFilePath: [],
load: [
() => ({
MOSAIC_TELEMETRY_ENABLED: "false",
}),
],
}),
MosaicTelemetryModule,
],
}).compile();
const service = module.get(MosaicTelemetryService);
expect(service).toBeDefined();
await module.close();
});
});
describe("lifecycle integration", () => {
it("should initialize service on module init when enabled", async () => {
module = await Test.createTestingModule({
imports: [
ConfigModule.forRoot({
isGlobal: true,
envFilePath: [],
load: [
() => ({
MOSAIC_TELEMETRY_ENABLED: "true",
MOSAIC_TELEMETRY_SERVER_URL: "https://tel.test.local",
MOSAIC_TELEMETRY_API_KEY: "a".repeat(64),
MOSAIC_TELEMETRY_INSTANCE_ID: "550e8400-e29b-41d4-a716-446655440000",
MOSAIC_TELEMETRY_DRY_RUN: "false",
}),
],
}),
MosaicTelemetryModule,
],
}).compile();
await module.init();
const service = module.get<MosaicTelemetryService>(MosaicTelemetryService);
expect(service.isEnabled).toBe(true);
await module.close();
});
it("should not start client when disabled via env", async () => {
module = await Test.createTestingModule({
imports: [
ConfigModule.forRoot({
isGlobal: true,
envFilePath: [],
load: [
() => ({
MOSAIC_TELEMETRY_ENABLED: "false",
}),
],
}),
MosaicTelemetryModule,
],
}).compile();
await module.init();
const service = module.get<MosaicTelemetryService>(MosaicTelemetryService);
expect(service.isEnabled).toBe(false);
await module.close();
});
it("should cleanly shut down on module destroy", async () => {
module = await Test.createTestingModule({
imports: [
ConfigModule.forRoot({
isGlobal: true,
envFilePath: [],
load: [
() => ({
MOSAIC_TELEMETRY_ENABLED: "true",
MOSAIC_TELEMETRY_SERVER_URL: "https://tel.test.local",
MOSAIC_TELEMETRY_API_KEY: "a".repeat(64),
MOSAIC_TELEMETRY_INSTANCE_ID: "550e8400-e29b-41d4-a716-446655440000",
MOSAIC_TELEMETRY_DRY_RUN: "false",
}),
],
}),
MosaicTelemetryModule,
],
}).compile();
await module.init();
const service = module.get<MosaicTelemetryService>(MosaicTelemetryService);
expect(service.isEnabled).toBe(true);
await expect(module.close()).resolves.not.toThrow();
});
});
});

View File

@@ -0,0 +1,41 @@
import { Module, Global } from "@nestjs/common";
import { ConfigModule } from "@nestjs/config";
import { AuthModule } from "../auth/auth.module";
import { MosaicTelemetryService } from "./mosaic-telemetry.service";
import { PredictionService } from "./prediction.service";
import { MosaicTelemetryController } from "./mosaic-telemetry.controller";
/**
* Global module providing Mosaic Telemetry integration via @mosaicstack/telemetry-client.
*
* Tracks task completion events and provides crowd-sourced predictions for
* token usage, cost estimation, and quality metrics.
*
* **This is separate from the OpenTelemetry (OTEL) TelemetryModule** which
* handles distributed request tracing. This module is specifically for
* Mosaic Stack's own telemetry aggregation service.
*
* Configuration via environment variables:
* - MOSAIC_TELEMETRY_ENABLED (boolean, default: true)
* - MOSAIC_TELEMETRY_SERVER_URL (string)
* - MOSAIC_TELEMETRY_API_KEY (string, 64-char hex)
* - MOSAIC_TELEMETRY_INSTANCE_ID (string, UUID)
* - MOSAIC_TELEMETRY_DRY_RUN (boolean, default: false)
*
* @example
* ```typescript
* // In any service (no need to import module — it's global):
* @Injectable()
* export class MyService {
* constructor(private readonly telemetry: MosaicTelemetryService) {}
* }
* ```
*/
@Global()
@Module({
imports: [ConfigModule, AuthModule],
controllers: [MosaicTelemetryController],
providers: [MosaicTelemetryService, PredictionService],
exports: [MosaicTelemetryService, PredictionService],
})
export class MosaicTelemetryModule {}

View File

@@ -0,0 +1,504 @@
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
import { ConfigService } from "@nestjs/config";
import { MOSAIC_TELEMETRY_ENV } from "./mosaic-telemetry.config";
import type {
TaskCompletionEvent,
PredictionQuery,
PredictionResponse,
} from "@mosaicstack/telemetry-client";
import { TaskType, Complexity, Provider, Outcome } from "@mosaicstack/telemetry-client";
// Track mock instances created during tests
const mockStartFn = vi.fn();
const mockStopFn = vi.fn().mockResolvedValue(undefined);
const mockTrackFn = vi.fn();
const mockGetPredictionFn = vi.fn().mockReturnValue(null);
const mockRefreshPredictionsFn = vi.fn().mockResolvedValue(undefined);
const mockBuildFn = vi.fn().mockReturnValue({ event_id: "test-event-id" });
vi.mock("@mosaicstack/telemetry-client", async (importOriginal) => {
const actual = await importOriginal<typeof import("@mosaicstack/telemetry-client")>();
class MockTelemetryClient {
private _isRunning = false;
constructor(_config: unknown) {
// no-op
}
get eventBuilder() {
return { build: mockBuildFn };
}
start(): void {
this._isRunning = true;
mockStartFn();
}
async stop(): Promise<void> {
this._isRunning = false;
await mockStopFn();
}
track(event: unknown): void {
mockTrackFn(event);
}
getPrediction(query: unknown): unknown {
return mockGetPredictionFn(query);
}
async refreshPredictions(queries: unknown): Promise<void> {
await mockRefreshPredictionsFn(queries);
}
get queueSize(): number {
return 0;
}
get isRunning(): boolean {
return this._isRunning;
}
}
return {
...actual,
TelemetryClient: MockTelemetryClient,
};
});
// Lazy-import the service after the mock is in place
const { MosaicTelemetryService } = await import("./mosaic-telemetry.service");
/**
* Create a ConfigService mock that returns environment values from the provided map.
*/
function createConfigService(envMap: Record<string, string | undefined> = {}): ConfigService {
const configService = {
get: vi.fn((key: string, defaultValue?: string): string => {
const value = envMap[key];
if (value !== undefined) {
return value;
}
return defaultValue ?? "";
}),
} as unknown as ConfigService;
return configService;
}
/**
* Default env config for an enabled telemetry service.
*/
const ENABLED_CONFIG: Record<string, string> = {
[MOSAIC_TELEMETRY_ENV.ENABLED]: "true",
[MOSAIC_TELEMETRY_ENV.SERVER_URL]: "https://tel.test.local",
[MOSAIC_TELEMETRY_ENV.API_KEY]: "a".repeat(64),
[MOSAIC_TELEMETRY_ENV.INSTANCE_ID]: "550e8400-e29b-41d4-a716-446655440000",
[MOSAIC_TELEMETRY_ENV.DRY_RUN]: "false",
};
/**
* Create a minimal TaskCompletionEvent for testing.
*/
function createTestEvent(): TaskCompletionEvent {
return {
schema_version: "1.0.0",
event_id: "test-event-123",
timestamp: new Date().toISOString(),
instance_id: "550e8400-e29b-41d4-a716-446655440000",
task_duration_ms: 5000,
task_type: TaskType.FEATURE,
complexity: Complexity.MEDIUM,
harness: "claude-code" as TaskCompletionEvent["harness"],
model: "claude-sonnet-4-20250514",
provider: Provider.ANTHROPIC,
estimated_input_tokens: 1000,
estimated_output_tokens: 500,
actual_input_tokens: 1100,
actual_output_tokens: 450,
estimated_cost_usd_micros: 5000,
actual_cost_usd_micros: 4800,
quality_gate_passed: true,
quality_gates_run: [],
quality_gates_failed: [],
context_compactions: 0,
context_rotations: 0,
context_utilization_final: 0.45,
outcome: Outcome.SUCCESS,
retry_count: 0,
};
}
describe("MosaicTelemetryService", () => {
let service: InstanceType<typeof MosaicTelemetryService>;
afterEach(async () => {
if (service) {
await service.onModuleDestroy();
}
vi.clearAllMocks();
});
describe("onModuleInit", () => {
it("should initialize the client when enabled with valid config", () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(mockStartFn).toHaveBeenCalledOnce();
expect(service.isEnabled).toBe(true);
});
it("should not initialize client when disabled", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(mockStartFn).not.toHaveBeenCalled();
expect(service.isEnabled).toBe(false);
});
it("should disable when server URL is missing", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.SERVER_URL]: "",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.isEnabled).toBe(false);
});
it("should disable when API key is missing", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.API_KEY]: "",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.isEnabled).toBe(false);
});
it("should disable when instance ID is missing", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.INSTANCE_ID]: "",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.isEnabled).toBe(false);
});
it("should log dry-run mode when configured", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.DRY_RUN]: "true",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(mockStartFn).toHaveBeenCalledOnce();
});
});
describe("onModuleDestroy", () => {
it("should stop the client on shutdown", async () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
await service.onModuleDestroy();
expect(mockStopFn).toHaveBeenCalledOnce();
});
it("should not throw when client is not initialized (disabled)", async () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
await expect(service.onModuleDestroy()).resolves.not.toThrow();
});
it("should not throw when called multiple times", async () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
await service.onModuleDestroy();
await expect(service.onModuleDestroy()).resolves.not.toThrow();
});
});
describe("trackTaskCompletion", () => {
it("should queue event via client.track() when enabled", () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
const event = createTestEvent();
service.trackTaskCompletion(event);
expect(mockTrackFn).toHaveBeenCalledWith(event);
});
it("should be a no-op when disabled", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
const event = createTestEvent();
service.trackTaskCompletion(event);
expect(mockTrackFn).not.toHaveBeenCalled();
});
});
describe("getPrediction", () => {
const testQuery: PredictionQuery = {
task_type: TaskType.FEATURE,
model: "claude-sonnet-4-20250514",
provider: Provider.ANTHROPIC,
complexity: Complexity.MEDIUM,
};
it("should return cached prediction when available", () => {
const mockPrediction: PredictionResponse = {
prediction: {
input_tokens: { p10: 100, p25: 200, median: 300, p75: 400, p90: 500 },
output_tokens: { p10: 50, p25: 100, median: 150, p75: 200, p90: 250 },
cost_usd_micros: { median: 5000 },
duration_ms: { median: 10000 },
correction_factors: { input: 1.0, output: 1.0 },
quality: { gate_pass_rate: 0.95, success_rate: 0.9 },
},
metadata: {
sample_size: 100,
fallback_level: 0,
confidence: "high",
last_updated: new Date().toISOString(),
cache_hit: true,
},
};
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
mockGetPredictionFn.mockReturnValueOnce(mockPrediction);
const result = service.getPrediction(testQuery);
expect(result).toEqual(mockPrediction);
expect(mockGetPredictionFn).toHaveBeenCalledWith(testQuery);
});
it("should return null when disabled", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
const result = service.getPrediction(testQuery);
expect(result).toBeNull();
});
it("should return null when no cached prediction exists", () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
mockGetPredictionFn.mockReturnValueOnce(null);
const result = service.getPrediction(testQuery);
expect(result).toBeNull();
});
});
describe("refreshPredictions", () => {
const testQueries: PredictionQuery[] = [
{
task_type: TaskType.FEATURE,
model: "claude-sonnet-4-20250514",
provider: Provider.ANTHROPIC,
complexity: Complexity.MEDIUM,
},
];
it("should call client.refreshPredictions when enabled", async () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
await service.refreshPredictions(testQueries);
expect(mockRefreshPredictionsFn).toHaveBeenCalledWith(testQueries);
});
it("should be a no-op when disabled", async () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
await service.refreshPredictions(testQueries);
expect(mockRefreshPredictionsFn).not.toHaveBeenCalled();
});
});
describe("eventBuilder", () => {
it("should return EventBuilder when enabled", () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
const builder = service.eventBuilder;
expect(builder).toBeDefined();
expect(builder).not.toBeNull();
expect(typeof builder?.build).toBe("function");
});
it("should return null when disabled", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
const builder = service.eventBuilder;
expect(builder).toBeNull();
});
});
describe("isEnabled", () => {
it("should return true when client is running", () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.isEnabled).toBe(true);
});
it("should return false when disabled", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.isEnabled).toBe(false);
});
});
describe("queueSize", () => {
it("should return 0 when disabled", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.queueSize).toBe(0);
});
it("should delegate to client.queueSize when enabled", () => {
const configService = createConfigService(ENABLED_CONFIG);
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(service.queueSize).toBe(0);
});
});
describe("disabled mode (comprehensive)", () => {
beforeEach(() => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.ENABLED]: "false",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
});
it("should not make any HTTP calls when disabled", () => {
const event = createTestEvent();
service.trackTaskCompletion(event);
expect(mockTrackFn).not.toHaveBeenCalled();
expect(mockStartFn).not.toHaveBeenCalled();
});
it("should safely handle all method calls when disabled", async () => {
expect(() => service.trackTaskCompletion(createTestEvent())).not.toThrow();
expect(
service.getPrediction({
task_type: TaskType.FEATURE,
model: "test",
provider: Provider.ANTHROPIC,
complexity: Complexity.LOW,
})
).toBeNull();
await expect(service.refreshPredictions([])).resolves.not.toThrow();
expect(service.eventBuilder).toBeNull();
expect(service.isEnabled).toBe(false);
expect(service.queueSize).toBe(0);
});
});
describe("dry-run mode", () => {
it("should create client in dry-run mode", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.DRY_RUN]: "true",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
expect(mockStartFn).toHaveBeenCalledOnce();
expect(service.isEnabled).toBe(true);
});
it("should accept events in dry-run mode", () => {
const configService = createConfigService({
...ENABLED_CONFIG,
[MOSAIC_TELEMETRY_ENV.DRY_RUN]: "true",
});
service = new MosaicTelemetryService(configService);
service.onModuleInit();
const event = createTestEvent();
service.trackTaskCompletion(event);
expect(mockTrackFn).toHaveBeenCalledWith(event);
});
});
});

View File

@@ -0,0 +1,164 @@
import { Injectable, Logger, OnModuleInit, OnModuleDestroy } from "@nestjs/common";
import { ConfigService } from "@nestjs/config";
import {
TelemetryClient,
type TaskCompletionEvent,
type PredictionQuery,
type PredictionResponse,
type EventBuilder,
} from "@mosaicstack/telemetry-client";
import {
loadMosaicTelemetryConfig,
toSdkConfig,
type MosaicTelemetryModuleConfig,
} from "./mosaic-telemetry.config";
/**
* NestJS service wrapping the @mosaicstack/telemetry-client SDK.
*
* Provides convenience methods for tracking task completions and reading
* crowd-sourced predictions. When telemetry is disabled via
* MOSAIC_TELEMETRY_ENABLED=false, all methods are safe no-ops.
*
* This service is provided globally by MosaicTelemetryModule — any service
* can inject it without importing the module explicitly.
*
* @example
* ```typescript
* @Injectable()
* export class TasksService {
* constructor(private readonly telemetry: MosaicTelemetryService) {}
*
* async completeTask(taskId: string): Promise<void> {
* // ... complete the task ...
* const event = this.telemetry.eventBuilder.build({ ... });
* this.telemetry.trackTaskCompletion(event);
* }
* }
* ```
*/
@Injectable()
export class MosaicTelemetryService implements OnModuleInit, OnModuleDestroy {
private readonly logger = new Logger(MosaicTelemetryService.name);
private client: TelemetryClient | null = null;
private config: MosaicTelemetryModuleConfig | null = null;
constructor(private readonly configService: ConfigService) {}
/**
* Initialize the telemetry client on module startup.
* Reads configuration from environment variables and starts background submission.
*/
onModuleInit(): void {
this.config = loadMosaicTelemetryConfig(this.configService);
if (!this.config.enabled) {
this.logger.log("Mosaic Telemetry is disabled");
return;
}
if (!this.config.serverUrl || !this.config.apiKey || !this.config.instanceId) {
this.logger.warn(
"Mosaic Telemetry is enabled but missing configuration " +
"(MOSAIC_TELEMETRY_SERVER_URL, MOSAIC_TELEMETRY_API_KEY, or MOSAIC_TELEMETRY_INSTANCE_ID). " +
"Telemetry will remain disabled."
);
this.config = { ...this.config, enabled: false };
return;
}
const sdkConfig = toSdkConfig(this.config, (error: Error) => {
this.logger.error(`Telemetry client error: ${error.message}`, error.stack);
});
this.client = new TelemetryClient(sdkConfig);
this.client.start();
const mode = this.config.dryRun ? "dry-run" : "live";
this.logger.log(`Mosaic Telemetry client started (${mode}) -> ${this.config.serverUrl}`);
}
/**
* Stop the telemetry client on module shutdown.
* Flushes any remaining queued events before stopping.
*/
async onModuleDestroy(): Promise<void> {
if (this.client) {
this.logger.log("Stopping Mosaic Telemetry client...");
await this.client.stop();
this.client = null;
this.logger.log("Mosaic Telemetry client stopped");
}
}
/**
* Queue a task completion event for batch submission.
* No-op when telemetry is disabled.
*
* @param event - The task completion event to track
*/
trackTaskCompletion(event: TaskCompletionEvent): void {
if (!this.client) {
return;
}
this.client.track(event);
}
/**
* Get a cached prediction for the given query.
* Returns null when telemetry is disabled or if not cached/expired.
*
* @param query - The prediction query parameters
* @returns Cached prediction response, or null
*/
getPrediction(query: PredictionQuery): PredictionResponse | null {
if (!this.client) {
return null;
}
return this.client.getPrediction(query);
}
/**
* Force-refresh predictions from the telemetry server.
* No-op when telemetry is disabled.
*
* @param queries - Array of prediction queries to refresh
*/
async refreshPredictions(queries: PredictionQuery[]): Promise<void> {
if (!this.client) {
return;
}
await this.client.refreshPredictions(queries);
}
/**
* Get the EventBuilder for constructing TaskCompletionEvent objects.
* Returns null when telemetry is disabled.
*
* @returns EventBuilder instance, or null if disabled
*/
get eventBuilder(): EventBuilder | null {
if (!this.client) {
return null;
}
return this.client.eventBuilder;
}
/**
* Whether the telemetry client is currently active and running.
*/
get isEnabled(): boolean {
return this.client?.isRunning ?? false;
}
/**
* Number of events currently queued for submission.
* Returns 0 when telemetry is disabled.
*/
get queueSize(): number {
if (!this.client) {
return 0;
}
return this.client.queueSize;
}
}

View File

@@ -0,0 +1,297 @@
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { TaskType, Complexity, Provider } from "@mosaicstack/telemetry-client";
import type { PredictionResponse, PredictionQuery } from "@mosaicstack/telemetry-client";
import { MosaicTelemetryService } from "./mosaic-telemetry.service";
import { PredictionService } from "./prediction.service";
describe("PredictionService", () => {
let service: PredictionService;
let mockTelemetryService: {
isEnabled: boolean;
getPrediction: ReturnType<typeof vi.fn>;
refreshPredictions: ReturnType<typeof vi.fn>;
};
const mockPredictionResponse: PredictionResponse = {
prediction: {
input_tokens: {
p10: 50,
p25: 80,
median: 120,
p75: 200,
p90: 350,
},
output_tokens: {
p10: 100,
p25: 150,
median: 250,
p75: 400,
p90: 600,
},
cost_usd_micros: {
p10: 500,
p25: 800,
median: 1200,
p75: 2000,
p90: 3500,
},
duration_ms: {
p10: 200,
p25: 400,
median: 800,
p75: 1500,
p90: 3000,
},
correction_factors: {
input: 1.0,
output: 1.0,
},
quality: {
gate_pass_rate: 0.95,
success_rate: 0.92,
},
},
metadata: {
sample_size: 150,
fallback_level: 0,
confidence: "high",
last_updated: "2026-02-15T00:00:00Z",
cache_hit: true,
},
};
const nullPredictionResponse: PredictionResponse = {
prediction: null,
metadata: {
sample_size: 0,
fallback_level: 3,
confidence: "none",
last_updated: null,
cache_hit: false,
},
};
beforeEach(async () => {
mockTelemetryService = {
isEnabled: true,
getPrediction: vi.fn().mockReturnValue(mockPredictionResponse),
refreshPredictions: vi.fn().mockResolvedValue(undefined),
};
const module: TestingModule = await Test.createTestingModule({
providers: [
PredictionService,
{
provide: MosaicTelemetryService,
useValue: mockTelemetryService,
},
],
}).compile();
service = module.get<PredictionService>(PredictionService);
});
it("should be defined", () => {
expect(service).toBeDefined();
});
// ---------- getEstimate ----------
describe("getEstimate", () => {
it("should return prediction response for valid query", () => {
const result = service.getEstimate(
TaskType.IMPLEMENTATION,
"claude-sonnet-4-5",
Provider.ANTHROPIC,
Complexity.LOW
);
expect(result).toEqual(mockPredictionResponse);
expect(mockTelemetryService.getPrediction).toHaveBeenCalledWith({
task_type: TaskType.IMPLEMENTATION,
model: "claude-sonnet-4-5",
provider: Provider.ANTHROPIC,
complexity: Complexity.LOW,
});
});
it("should pass correct query parameters to telemetry service", () => {
service.getEstimate(TaskType.CODE_REVIEW, "gpt-4o", Provider.OPENAI, Complexity.HIGH);
expect(mockTelemetryService.getPrediction).toHaveBeenCalledWith({
task_type: TaskType.CODE_REVIEW,
model: "gpt-4o",
provider: Provider.OPENAI,
complexity: Complexity.HIGH,
});
});
it("should return null when telemetry returns null", () => {
mockTelemetryService.getPrediction.mockReturnValue(null);
const result = service.getEstimate(
TaskType.IMPLEMENTATION,
"claude-sonnet-4-5",
Provider.ANTHROPIC,
Complexity.LOW
);
expect(result).toBeNull();
});
it("should return null prediction response when confidence is none", () => {
mockTelemetryService.getPrediction.mockReturnValue(nullPredictionResponse);
const result = service.getEstimate(
TaskType.IMPLEMENTATION,
"unknown-model",
Provider.UNKNOWN,
Complexity.LOW
);
expect(result).toEqual(nullPredictionResponse);
expect(result?.metadata.confidence).toBe("none");
});
it("should return null and not throw when getPrediction throws", () => {
mockTelemetryService.getPrediction.mockImplementation(() => {
throw new Error("Prediction fetch failed");
});
const result = service.getEstimate(
TaskType.IMPLEMENTATION,
"claude-sonnet-4-5",
Provider.ANTHROPIC,
Complexity.LOW
);
expect(result).toBeNull();
});
it("should handle non-Error thrown objects gracefully", () => {
mockTelemetryService.getPrediction.mockImplementation(() => {
throw "string error";
});
const result = service.getEstimate(
TaskType.IMPLEMENTATION,
"claude-sonnet-4-5",
Provider.ANTHROPIC,
Complexity.LOW
);
expect(result).toBeNull();
});
});
// ---------- refreshCommonPredictions ----------
describe("refreshCommonPredictions", () => {
it("should call refreshPredictions with multiple query combinations", async () => {
await service.refreshCommonPredictions();
expect(mockTelemetryService.refreshPredictions).toHaveBeenCalledTimes(1);
const queries: PredictionQuery[] = mockTelemetryService.refreshPredictions.mock.calls[0][0];
// Should have queries for cross-product of models, task types, and complexities
expect(queries.length).toBeGreaterThan(0);
// Verify all queries have valid structure
for (const query of queries) {
expect(query).toHaveProperty("task_type");
expect(query).toHaveProperty("model");
expect(query).toHaveProperty("provider");
expect(query).toHaveProperty("complexity");
}
});
it("should include Anthropic model predictions", async () => {
await service.refreshCommonPredictions();
const queries: PredictionQuery[] = mockTelemetryService.refreshPredictions.mock.calls[0][0];
const anthropicQueries = queries.filter(
(q: PredictionQuery) => q.provider === Provider.ANTHROPIC
);
expect(anthropicQueries.length).toBeGreaterThan(0);
});
it("should include OpenAI model predictions", async () => {
await service.refreshCommonPredictions();
const queries: PredictionQuery[] = mockTelemetryService.refreshPredictions.mock.calls[0][0];
const openaiQueries = queries.filter((q: PredictionQuery) => q.provider === Provider.OPENAI);
expect(openaiQueries.length).toBeGreaterThan(0);
});
it("should not call refreshPredictions when telemetry is disabled", async () => {
mockTelemetryService.isEnabled = false;
await service.refreshCommonPredictions();
expect(mockTelemetryService.refreshPredictions).not.toHaveBeenCalled();
});
it("should not throw when refreshPredictions rejects", async () => {
mockTelemetryService.refreshPredictions.mockRejectedValue(new Error("Server unreachable"));
// Should not throw
await expect(service.refreshCommonPredictions()).resolves.not.toThrow();
});
it("should include common task types in queries", async () => {
await service.refreshCommonPredictions();
const queries: PredictionQuery[] = mockTelemetryService.refreshPredictions.mock.calls[0][0];
const taskTypes = new Set(queries.map((q: PredictionQuery) => q.task_type));
expect(taskTypes.has(TaskType.IMPLEMENTATION)).toBe(true);
expect(taskTypes.has(TaskType.PLANNING)).toBe(true);
expect(taskTypes.has(TaskType.CODE_REVIEW)).toBe(true);
});
it("should include common complexity levels in queries", async () => {
await service.refreshCommonPredictions();
const queries: PredictionQuery[] = mockTelemetryService.refreshPredictions.mock.calls[0][0];
const complexities = new Set(queries.map((q: PredictionQuery) => q.complexity));
expect(complexities.has(Complexity.LOW)).toBe(true);
expect(complexities.has(Complexity.MEDIUM)).toBe(true);
});
});
// ---------- onModuleInit ----------
describe("onModuleInit", () => {
it("should trigger refreshCommonPredictions on init when telemetry is enabled", () => {
// refreshPredictions is async, but onModuleInit fires it and forgets
service.onModuleInit();
// Give the promise microtask a chance to execute
expect(mockTelemetryService.isEnabled).toBe(true);
// refreshPredictions will be called asynchronously
});
it("should not refresh when telemetry is disabled", () => {
mockTelemetryService.isEnabled = false;
service.onModuleInit();
// refreshPredictions should not be called since we returned early
expect(mockTelemetryService.refreshPredictions).not.toHaveBeenCalled();
});
it("should not throw when refresh fails on init", () => {
mockTelemetryService.refreshPredictions.mockRejectedValue(new Error("Connection refused"));
// Should not throw
expect(() => service.onModuleInit()).not.toThrow();
});
});
});

View File

@@ -0,0 +1,161 @@
import { Injectable, Logger, OnModuleInit } from "@nestjs/common";
import {
TaskType,
Complexity,
Provider,
type PredictionQuery,
type PredictionResponse,
} from "@mosaicstack/telemetry-client";
import { MosaicTelemetryService } from "./mosaic-telemetry.service";
/**
* Common model-provider combinations used for pre-fetching predictions.
* These represent the most frequently used LLM configurations.
*/
const COMMON_MODELS: { model: string; provider: Provider }[] = [
{ model: "claude-sonnet-4-5", provider: Provider.ANTHROPIC },
{ model: "claude-opus-4", provider: Provider.ANTHROPIC },
{ model: "claude-haiku-4-5", provider: Provider.ANTHROPIC },
{ model: "gpt-4o", provider: Provider.OPENAI },
{ model: "gpt-4o-mini", provider: Provider.OPENAI },
];
/**
* Common task types to pre-fetch predictions for.
*/
const COMMON_TASK_TYPES: TaskType[] = [
TaskType.IMPLEMENTATION,
TaskType.PLANNING,
TaskType.CODE_REVIEW,
];
/**
* Common complexity levels to pre-fetch predictions for.
*/
const COMMON_COMPLEXITIES: Complexity[] = [Complexity.LOW, Complexity.MEDIUM];
/**
* PredictionService
*
* Provides pre-task cost and token estimates using crowd-sourced prediction data
* from the Mosaic Telemetry server. Predictions are cached by the underlying SDK
* with a 6-hour TTL.
*
* This service is intentionally non-blocking: if predictions are unavailable
* (telemetry disabled, server unreachable, no data), all methods return null
* without throwing errors. Task execution should never be blocked by prediction
* failures.
*
* @example
* ```typescript
* const estimate = this.predictionService.getEstimate(
* TaskType.IMPLEMENTATION,
* "claude-sonnet-4-5",
* Provider.ANTHROPIC,
* Complexity.LOW,
* );
* if (estimate?.prediction) {
* console.log(`Estimated cost: ${estimate.prediction.cost_usd_micros}`);
* }
* ```
*/
@Injectable()
export class PredictionService implements OnModuleInit {
private readonly logger = new Logger(PredictionService.name);
constructor(private readonly telemetry: MosaicTelemetryService) {}
/**
* Refresh common predictions on startup.
* Runs asynchronously and never blocks module initialization.
*/
onModuleInit(): void {
if (!this.telemetry.isEnabled) {
this.logger.log("Telemetry disabled - skipping prediction refresh");
return;
}
// Fire-and-forget: refresh in the background
this.refreshCommonPredictions().catch((error: unknown) => {
const msg = error instanceof Error ? error.message : String(error);
this.logger.warn(`Failed to refresh common predictions on startup: ${msg}`);
});
}
/**
* Get a cost/token estimate for a given task configuration.
*
* Returns the cached prediction from the SDK, or null if:
* - Telemetry is disabled
* - No prediction data exists for this combination
* - The prediction has expired
*
* @param taskType - The type of task to estimate
* @param model - The model name (e.g. "claude-sonnet-4-5")
* @param provider - The provider enum value
* @param complexity - The complexity level
* @returns Prediction response with estimates and confidence, or null
*/
getEstimate(
taskType: TaskType,
model: string,
provider: Provider,
complexity: Complexity
): PredictionResponse | null {
try {
const query: PredictionQuery = {
task_type: taskType,
model,
provider,
complexity,
};
return this.telemetry.getPrediction(query);
} catch (error: unknown) {
const msg = error instanceof Error ? error.message : String(error);
this.logger.warn(`Failed to get prediction estimate: ${msg}`);
return null;
}
}
/**
* Refresh predictions for commonly used (taskType, model, provider, complexity) combinations.
*
* Generates the cross-product of common models, task types, and complexities,
* then batch-refreshes them from the telemetry server. The SDK caches the
* results with a 6-hour TTL.
*
* This method is safe to call at any time. If telemetry is disabled or the
* server is unreachable, it completes without error.
*/
async refreshCommonPredictions(): Promise<void> {
if (!this.telemetry.isEnabled) {
return;
}
const queries: PredictionQuery[] = [];
for (const { model, provider } of COMMON_MODELS) {
for (const taskType of COMMON_TASK_TYPES) {
for (const complexity of COMMON_COMPLEXITIES) {
queries.push({
task_type: taskType,
model,
provider,
complexity,
});
}
}
}
this.logger.log(`Refreshing ${String(queries.length)} common prediction queries...`);
try {
await this.telemetry.refreshPredictions(queries);
this.logger.log(`Successfully refreshed ${String(queries.length)} predictions`);
} catch (error: unknown) {
const msg = error instanceof Error ? error.message : String(error);
this.logger.warn(`Failed to refresh predictions: ${msg}`);
}
}
}