feat(#127): refactor LlmService to use provider pattern

Refactor LlmService to delegate to LlmManagerService instead of using
Ollama directly. This enables multiple provider support and user-specific
provider configuration.

Changes:
- Remove direct Ollama client from LlmService
- Delegate all LLM operations to provider via LlmManagerService
- Update health status to use provider-agnostic interface
- Add PrismaModule to LlmModule for manager service
- Maintain backward compatibility with existing API
- Achieve 89.74% test coverage

Fixes #127

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-31 12:33:56 -06:00
parent be6c15116d
commit 1f97e6de40
5 changed files with 433 additions and 133 deletions

View File

@@ -2,14 +2,102 @@ import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { LlmController } from "./llm.controller";
import { LlmService } from "./llm.service";
import type { ChatRequestDto, EmbedRequestDto } from "./dto";
import type { ChatRequestDto } from "./dto";
describe("LlmController", () => {
let controller: LlmController;
const mockService = { checkHealth: vi.fn(), listModels: vi.fn(), chat: vi.fn(), chatStream: vi.fn(), embed: vi.fn() };
beforeEach(async () => { vi.clearAllMocks(); controller = (await Test.createTestingModule({ controllers: [LlmController], providers: [{ provide: LlmService, useValue: mockService }] }).compile()).get(LlmController); });
it("should be defined", () => { expect(controller).toBeDefined(); });
describe("health", () => { it("should return status", async () => { const s = { healthy: true, host: "h" }; mockService.checkHealth.mockResolvedValue(s); expect(await controller.health()).toEqual(s); }); });
describe("listModels", () => { it("should return models", async () => { mockService.listModels.mockResolvedValue(["m1"]); expect(await controller.listModels()).toEqual({ models: ["m1"] }); }); });
describe("chat", () => { const req: ChatRequestDto = { model: "m", messages: [{ role: "user", content: "x" }] }; const res = { setHeader: vi.fn(), write: vi.fn(), end: vi.fn() }; it("should return response", async () => { const r = { model: "m", message: { role: "assistant", content: "y" }, done: true }; mockService.chat.mockResolvedValue(r); expect(await controller.chat(req, res as any)).toEqual(r); }); it("should stream", async () => { mockService.chatStream.mockReturnValue((async function* () { yield { model: "m", message: { role: "a", content: "x" }, done: true }; })()); await controller.chat({ ...req, stream: true }, res as any); expect(res.setHeader).toHaveBeenCalled(); expect(res.end).toHaveBeenCalled(); }); });
describe("embed", () => { it("should return embeddings", async () => { const r = { model: "m", embeddings: [[0.1]] }; mockService.embed.mockResolvedValue(r); expect(await controller.embed({ model: "m", input: ["x"] })).toEqual(r); }); });
const mockService = {
checkHealth: vi.fn(),
listModels: vi.fn(),
chat: vi.fn(),
chatStream: vi.fn(),
embed: vi.fn(),
};
beforeEach(async () => {
vi.clearAllMocks();
const module: TestingModule = await Test.createTestingModule({
controllers: [LlmController],
providers: [{ provide: LlmService, useValue: mockService }],
}).compile();
controller = module.get(LlmController);
});
it("should be defined", () => {
expect(controller).toBeDefined();
});
describe("health", () => {
it("should return status", async () => {
const status = {
healthy: true,
provider: "ollama",
endpoint: "http://localhost:11434",
};
mockService.checkHealth.mockResolvedValue(status);
const result = await controller.health();
expect(result).toEqual(status);
});
});
describe("listModels", () => {
it("should return models", async () => {
mockService.listModels.mockResolvedValue(["model1"]);
const result = await controller.listModels();
expect(result).toEqual({ models: ["model1"] });
});
});
describe("chat", () => {
const request: ChatRequestDto = {
model: "llama3.2",
messages: [{ role: "user", content: "hello" }],
};
const mockResponse = {
setHeader: vi.fn(),
write: vi.fn(),
end: vi.fn(),
};
it("should return response for non-streaming chat", async () => {
const chatResponse = {
model: "llama3.2",
message: { role: "assistant", content: "Hello!" },
done: true,
};
mockService.chat.mockResolvedValue(chatResponse);
const result = await controller.chat(request, mockResponse as never);
expect(result).toEqual(chatResponse);
});
it("should stream response for streaming chat", async () => {
mockService.chatStream.mockReturnValue(
(async function* () {
yield { model: "llama3.2", message: { role: "assistant", content: "Hi" }, done: true };
})()
);
await controller.chat({ ...request, stream: true }, mockResponse as never);
expect(mockResponse.setHeader).toHaveBeenCalled();
expect(mockResponse.end).toHaveBeenCalled();
});
});
describe("embed", () => {
it("should return embeddings", async () => {
const embedResponse = { model: "llama3.2", embeddings: [[0.1, 0.2]] };
mockService.embed.mockResolvedValue(embedResponse);
const result = await controller.embed({ model: "llama3.2", input: ["text"] });
expect(result).toEqual(embedResponse);
});
});
});