- Create LlmTelemetryTrackerService for non-blocking event emission - Normalize token usage across Anthropic, OpenAI, Ollama providers - Add cost table with per-token pricing in microdollars - Instrument chat, chatStream, and embed methods - Infer task type from calling context - Aggregate streaming tokens after stream ends with fallback estimation - Add 69 unit tests for tracker service, cost table, and LLM service Refs #371 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
17 lines
731 B
TypeScript
17 lines
731 B
TypeScript
import { Module } from "@nestjs/common";
|
|
import { LlmController } from "./llm.controller";
|
|
import { LlmProviderAdminController } from "./llm-provider-admin.controller";
|
|
import { LlmService } from "./llm.service";
|
|
import { LlmManagerService } from "./llm-manager.service";
|
|
import { LlmTelemetryTrackerService } from "./llm-telemetry-tracker.service";
|
|
import { PrismaModule } from "../prisma/prisma.module";
|
|
import { LlmUsageModule } from "../llm-usage/llm-usage.module";
|
|
|
|
@Module({
|
|
imports: [PrismaModule, LlmUsageModule],
|
|
controllers: [LlmController, LlmProviderAdminController],
|
|
providers: [LlmService, LlmManagerService, LlmTelemetryTrackerService],
|
|
exports: [LlmService, LlmManagerService],
|
|
})
|
|
export class LlmModule {}
|