Files
stack/apps/api/src/llm/llm.controller.spec.ts
Jason Woltje 1f97e6de40 feat(#127): refactor LlmService to use provider pattern
Refactor LlmService to delegate to LlmManagerService instead of using
Ollama directly. This enables multiple provider support and user-specific
provider configuration.

Changes:
- Remove direct Ollama client from LlmService
- Delegate all LLM operations to provider via LlmManagerService
- Update health status to use provider-agnostic interface
- Add PrismaModule to LlmModule for manager service
- Maintain backward compatibility with existing API
- Achieve 89.74% test coverage

Fixes #127

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-31 12:33:56 -06:00

104 lines
2.9 KiB
TypeScript

import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { LlmController } from "./llm.controller";
import { LlmService } from "./llm.service";
import type { ChatRequestDto } from "./dto";
describe("LlmController", () => {
let controller: LlmController;
const mockService = {
checkHealth: vi.fn(),
listModels: vi.fn(),
chat: vi.fn(),
chatStream: vi.fn(),
embed: vi.fn(),
};
beforeEach(async () => {
vi.clearAllMocks();
const module: TestingModule = await Test.createTestingModule({
controllers: [LlmController],
providers: [{ provide: LlmService, useValue: mockService }],
}).compile();
controller = module.get(LlmController);
});
it("should be defined", () => {
expect(controller).toBeDefined();
});
describe("health", () => {
it("should return status", async () => {
const status = {
healthy: true,
provider: "ollama",
endpoint: "http://localhost:11434",
};
mockService.checkHealth.mockResolvedValue(status);
const result = await controller.health();
expect(result).toEqual(status);
});
});
describe("listModels", () => {
it("should return models", async () => {
mockService.listModels.mockResolvedValue(["model1"]);
const result = await controller.listModels();
expect(result).toEqual({ models: ["model1"] });
});
});
describe("chat", () => {
const request: ChatRequestDto = {
model: "llama3.2",
messages: [{ role: "user", content: "hello" }],
};
const mockResponse = {
setHeader: vi.fn(),
write: vi.fn(),
end: vi.fn(),
};
it("should return response for non-streaming chat", async () => {
const chatResponse = {
model: "llama3.2",
message: { role: "assistant", content: "Hello!" },
done: true,
};
mockService.chat.mockResolvedValue(chatResponse);
const result = await controller.chat(request, mockResponse as never);
expect(result).toEqual(chatResponse);
});
it("should stream response for streaming chat", async () => {
mockService.chatStream.mockReturnValue(
(async function* () {
yield { model: "llama3.2", message: { role: "assistant", content: "Hi" }, done: true };
})()
);
await controller.chat({ ...request, stream: true }, mockResponse as never);
expect(mockResponse.setHeader).toHaveBeenCalled();
expect(mockResponse.end).toHaveBeenCalled();
});
});
describe("embed", () => {
it("should return embeddings", async () => {
const embedResponse = { model: "llama3.2", embeddings: [[0.1, 0.2]] };
mockService.embed.mockResolvedValue(embedResponse);
const result = await controller.embed({ model: "llama3.2", input: ["text"] });
expect(result).toEqual(embedResponse);
});
});
});