feat(#309): Add LLM usage tracking and analytics
Implements comprehensive LLM usage tracking with analytics endpoints. Implementation: - Added LlmUsageLog model to Prisma schema - Created llm-usage module with service, controller, and DTOs - Added tracking for token usage, costs, and durations - Implemented analytics aggregation by provider, model, and task type - Added filtering by workspace, provider, model, user, and date range Testing: - 20 unit tests with 90.8% coverage (exceeds 85% requirement) - Tests for service and controller with full error handling - Tests use Vitest following project conventions API Endpoints: - GET /api/llm-usage/analytics - Aggregated usage analytics - GET /api/llm-usage/by-workspace/:workspaceId - Workspace usage logs - GET /api/llm-usage/by-workspace/:workspaceId/provider/:provider - Provider logs - GET /api/llm-usage/by-workspace/:workspaceId/model/:model - Model logs Database: - LlmUsageLog table with indexes for efficient queries - Relations to User, Workspace, and LlmProviderInstance - Ready for migration with: pnpm prisma migrate dev Refs #309 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
210
apps/api/src/llm-usage/llm-usage.controller.spec.ts
Normal file
210
apps/api/src/llm-usage/llm-usage.controller.spec.ts
Normal file
@@ -0,0 +1,210 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { Test, TestingModule } from "@nestjs/testing";
|
||||
import { LlmUsageController } from "./llm-usage.controller";
|
||||
import { LlmUsageService } from "./llm-usage.service";
|
||||
import type { UsageAnalyticsQueryDto } from "./dto";
|
||||
|
||||
describe("LlmUsageController", () => {
|
||||
let controller: LlmUsageController;
|
||||
let service: LlmUsageService;
|
||||
|
||||
const mockLlmUsageService = {
|
||||
getUsageAnalytics: vi.fn(),
|
||||
getUsageByWorkspace: vi.fn(),
|
||||
getUsageByProvider: vi.fn(),
|
||||
getUsageByModel: vi.fn(),
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
controllers: [LlmUsageController],
|
||||
providers: [
|
||||
{
|
||||
provide: LlmUsageService,
|
||||
useValue: mockLlmUsageService,
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
controller = module.get<LlmUsageController>(LlmUsageController);
|
||||
service = module.get<LlmUsageService>(LlmUsageService);
|
||||
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
it("should be defined", () => {
|
||||
expect(controller).toBeDefined();
|
||||
});
|
||||
|
||||
describe("getAnalytics", () => {
|
||||
it("should return usage analytics", async () => {
|
||||
const query: UsageAnalyticsQueryDto = {
|
||||
workspaceId: "workspace-123",
|
||||
};
|
||||
|
||||
const expectedAnalytics = {
|
||||
totalCalls: 10,
|
||||
totalPromptTokens: 1000,
|
||||
totalCompletionTokens: 500,
|
||||
totalTokens: 1500,
|
||||
totalCostCents: 1.5,
|
||||
averageDurationMs: 1200,
|
||||
byProvider: [
|
||||
{
|
||||
provider: "ollama",
|
||||
calls: 10,
|
||||
promptTokens: 1000,
|
||||
completionTokens: 500,
|
||||
totalTokens: 1500,
|
||||
costCents: 1.5,
|
||||
averageDurationMs: 1200,
|
||||
},
|
||||
],
|
||||
byModel: [
|
||||
{
|
||||
model: "llama3.2",
|
||||
calls: 10,
|
||||
promptTokens: 1000,
|
||||
completionTokens: 500,
|
||||
totalTokens: 1500,
|
||||
costCents: 1.5,
|
||||
averageDurationMs: 1200,
|
||||
},
|
||||
],
|
||||
byTaskType: [
|
||||
{
|
||||
taskType: "chat",
|
||||
calls: 10,
|
||||
promptTokens: 1000,
|
||||
completionTokens: 500,
|
||||
totalTokens: 1500,
|
||||
costCents: 1.5,
|
||||
averageDurationMs: 1200,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
mockLlmUsageService.getUsageAnalytics.mockResolvedValue(expectedAnalytics);
|
||||
|
||||
const result = await controller.getAnalytics(query);
|
||||
|
||||
expect(result).toEqual({
|
||||
data: expectedAnalytics,
|
||||
});
|
||||
expect(service.getUsageAnalytics).toHaveBeenCalledWith(query);
|
||||
});
|
||||
|
||||
it("should pass all query parameters to service", async () => {
|
||||
const query: UsageAnalyticsQueryDto = {
|
||||
workspaceId: "workspace-123",
|
||||
provider: "ollama",
|
||||
model: "llama3.2",
|
||||
userId: "user-456",
|
||||
startDate: "2024-01-01T00:00:00Z",
|
||||
endDate: "2024-01-31T23:59:59Z",
|
||||
};
|
||||
|
||||
mockLlmUsageService.getUsageAnalytics.mockResolvedValue({
|
||||
totalCalls: 0,
|
||||
totalPromptTokens: 0,
|
||||
totalCompletionTokens: 0,
|
||||
totalTokens: 0,
|
||||
totalCostCents: 0,
|
||||
averageDurationMs: 0,
|
||||
byProvider: [],
|
||||
byModel: [],
|
||||
byTaskType: [],
|
||||
});
|
||||
|
||||
await controller.getAnalytics(query);
|
||||
|
||||
expect(service.getUsageAnalytics).toHaveBeenCalledWith(query);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getUsageByWorkspace", () => {
|
||||
it("should return usage logs for a workspace", async () => {
|
||||
const workspaceId = "workspace-123";
|
||||
const expectedLogs = [
|
||||
{
|
||||
id: "log-1",
|
||||
workspaceId,
|
||||
userId: "user-1",
|
||||
provider: "ollama",
|
||||
model: "llama3.2",
|
||||
promptTokens: 100,
|
||||
completionTokens: 50,
|
||||
totalTokens: 150,
|
||||
createdAt: new Date(),
|
||||
},
|
||||
];
|
||||
|
||||
mockLlmUsageService.getUsageByWorkspace.mockResolvedValue(expectedLogs);
|
||||
|
||||
const result = await controller.getUsageByWorkspace(workspaceId);
|
||||
|
||||
expect(result).toEqual({
|
||||
data: expectedLogs,
|
||||
});
|
||||
expect(service.getUsageByWorkspace).toHaveBeenCalledWith(workspaceId);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getUsageByProvider", () => {
|
||||
it("should return usage logs for a provider", async () => {
|
||||
const workspaceId = "workspace-123";
|
||||
const provider = "ollama";
|
||||
const expectedLogs = [
|
||||
{
|
||||
id: "log-1",
|
||||
workspaceId,
|
||||
userId: "user-1",
|
||||
provider,
|
||||
model: "llama3.2",
|
||||
promptTokens: 100,
|
||||
completionTokens: 50,
|
||||
totalTokens: 150,
|
||||
createdAt: new Date(),
|
||||
},
|
||||
];
|
||||
|
||||
mockLlmUsageService.getUsageByProvider.mockResolvedValue(expectedLogs);
|
||||
|
||||
const result = await controller.getUsageByProvider(workspaceId, provider);
|
||||
|
||||
expect(result).toEqual({
|
||||
data: expectedLogs,
|
||||
});
|
||||
expect(service.getUsageByProvider).toHaveBeenCalledWith(workspaceId, provider);
|
||||
});
|
||||
});
|
||||
|
||||
describe("getUsageByModel", () => {
|
||||
it("should return usage logs for a model", async () => {
|
||||
const workspaceId = "workspace-123";
|
||||
const model = "llama3.2";
|
||||
const expectedLogs = [
|
||||
{
|
||||
id: "log-1",
|
||||
workspaceId,
|
||||
userId: "user-1",
|
||||
provider: "ollama",
|
||||
model,
|
||||
promptTokens: 100,
|
||||
completionTokens: 50,
|
||||
totalTokens: 150,
|
||||
createdAt: new Date(),
|
||||
},
|
||||
];
|
||||
|
||||
mockLlmUsageService.getUsageByModel.mockResolvedValue(expectedLogs);
|
||||
|
||||
const result = await controller.getUsageByModel(workspaceId, model);
|
||||
|
||||
expect(result).toEqual({
|
||||
data: expectedLogs,
|
||||
});
|
||||
expect(service.getUsageByModel).toHaveBeenCalledWith(workspaceId, model);
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user