- Add Ollama client library (ollama npm package) - Create LlmService for chat completion and embeddings - Support streaming responses via Server-Sent Events - Add configuration via env vars (OLLAMA_HOST, OLLAMA_TIMEOUT) - Create endpoints: GET /llm/health, GET /llm/models, POST /llm/chat, POST /llm/embed - Replace old OllamaModule with new LlmModule - Add comprehensive tests with >85% coverage Closes #21
16 lines
2.1 KiB
TypeScript
16 lines
2.1 KiB
TypeScript
import { describe, it, expect, beforeEach, vi } from "vitest";
|
|
import { Test, TestingModule } from "@nestjs/testing";
|
|
import { LlmController } from "./llm.controller";
|
|
import { LlmService } from "./llm.service";
|
|
import type { ChatRequestDto, EmbedRequestDto } from "./dto";
|
|
describe("LlmController", () => {
|
|
let controller: LlmController;
|
|
const mockService = { checkHealth: vi.fn(), listModels: vi.fn(), chat: vi.fn(), chatStream: vi.fn(), embed: vi.fn() };
|
|
beforeEach(async () => { vi.clearAllMocks(); controller = (await Test.createTestingModule({ controllers: [LlmController], providers: [{ provide: LlmService, useValue: mockService }] }).compile()).get(LlmController); });
|
|
it("should be defined", () => { expect(controller).toBeDefined(); });
|
|
describe("health", () => { it("should return status", async () => { const s = { healthy: true, host: "h" }; mockService.checkHealth.mockResolvedValue(s); expect(await controller.health()).toEqual(s); }); });
|
|
describe("listModels", () => { it("should return models", async () => { mockService.listModels.mockResolvedValue(["m1"]); expect(await controller.listModels()).toEqual({ models: ["m1"] }); }); });
|
|
describe("chat", () => { const req: ChatRequestDto = { model: "m", messages: [{ role: "user", content: "x" }] }; const res = { setHeader: vi.fn(), write: vi.fn(), end: vi.fn() }; it("should return response", async () => { const r = { model: "m", message: { role: "assistant", content: "y" }, done: true }; mockService.chat.mockResolvedValue(r); expect(await controller.chat(req, res as any)).toEqual(r); }); it("should stream", async () => { mockService.chatStream.mockReturnValue((async function* () { yield { model: "m", message: { role: "a", content: "x" }, done: true }; })()); await controller.chat({ ...req, stream: true }, res as any); expect(res.setHeader).toHaveBeenCalled(); expect(res.end).toHaveBeenCalled(); }); });
|
|
describe("embed", () => { it("should return embeddings", async () => { const r = { model: "m", embeddings: [[0.1]] }; mockService.embed.mockResolvedValue(r); expect(await controller.embed({ model: "m", input: ["x"] })).toEqual(r); }); });
|
|
});
|