feat(#127): refactor LlmService to use provider pattern

Refactor LlmService to delegate to LlmManagerService instead of using
Ollama directly. This enables multiple provider support and user-specific
provider configuration.

Changes:
- Remove direct Ollama client from LlmService
- Delegate all LLM operations to provider via LlmManagerService
- Update health status to use provider-agnostic interface
- Add PrismaModule to LlmModule for manager service
- Maintain backward compatibility with existing API
- Achieve 89.74% test coverage

Fixes #127

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-31 12:33:56 -06:00
parent be6c15116d
commit 1f97e6de40
5 changed files with 433 additions and 133 deletions

View File

@@ -1,19 +1,219 @@
import { describe, it, expect, beforeEach, vi, afterEach } from "vitest";
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { ServiceUnavailableException } from "@nestjs/common";
import { LlmService } from "./llm.service";
import type { ChatRequestDto, EmbedRequestDto } from "./dto";
const mockList = vi.fn(); const mockChat = vi.fn(); const mockEmbed = vi.fn();
vi.mock("ollama", () => ({ Ollama: class { list = mockList; chat = mockChat; embed = mockEmbed; } }));
import { LlmManagerService } from "./llm-manager.service";
import type { ChatRequestDto, EmbedRequestDto, ChatResponseDto, EmbedResponseDto } from "./dto";
import type {
LlmProviderInterface,
LlmProviderHealthStatus,
} from "./providers/llm-provider.interface";
describe("LlmService", () => {
let service: LlmService;
const originalEnv = { ...process.env };
beforeEach(async () => { process.env = { ...originalEnv, OLLAMA_HOST: "http://test:11434", OLLAMA_TIMEOUT: "60000" }; vi.clearAllMocks(); service = (await Test.createTestingModule({ providers: [LlmService] }).compile()).get(LlmService); });
afterEach(() => { process.env = originalEnv; });
it("should be defined", () => { expect(service).toBeDefined(); });
describe("checkHealth", () => { it("should return healthy", async () => { mockList.mockResolvedValue({ models: [{ name: "llama3.2" }] }); const r = await service.checkHealth(); expect(r.healthy).toBe(true); }); it("should return unhealthy on error", async () => { mockList.mockRejectedValue(new Error("fail")); const r = await service.checkHealth(); expect(r.healthy).toBe(false); }); });
describe("listModels", () => { it("should return models", async () => { mockList.mockResolvedValue({ models: [{ name: "llama3.2" }] }); expect(await service.listModels()).toEqual(["llama3.2"]); }); it("should throw on error", async () => { mockList.mockRejectedValue(new Error("fail")); await expect(service.listModels()).rejects.toThrow(ServiceUnavailableException); }); });
describe("chat", () => { const req: ChatRequestDto = { model: "llama3.2", messages: [{ role: "user", content: "Hi" }] }; it("should return response", async () => { mockChat.mockResolvedValue({ model: "llama3.2", message: { role: "assistant", content: "Hello" }, done: true }); const r = await service.chat(req); expect(r.message.content).toBe("Hello"); }); it("should throw on error", async () => { mockChat.mockRejectedValue(new Error("fail")); await expect(service.chat(req)).rejects.toThrow(ServiceUnavailableException); }); });
describe("chatStream", () => { it("should yield chunks", async () => { mockChat.mockResolvedValue((async function* () { yield { model: "m", message: { role: "a", content: "x" }, done: true }; })()); const chunks = []; for await (const c of service.chatStream({ model: "m", messages: [{ role: "user", content: "x" }], stream: true })) chunks.push(c); expect(chunks.length).toBe(1); }); });
describe("embed", () => { it("should return embeddings", async () => { mockEmbed.mockResolvedValue({ model: "m", embeddings: [[0.1]] }); const r = await service.embed({ model: "m", input: ["x"] }); expect(r.embeddings).toEqual([[0.1]]); }); });
let mockManagerService: {
getDefaultProvider: ReturnType<typeof vi.fn>;
};
let mockProvider: {
chat: ReturnType<typeof vi.fn>;
chatStream: ReturnType<typeof vi.fn>;
embed: ReturnType<typeof vi.fn>;
listModels: ReturnType<typeof vi.fn>;
checkHealth: ReturnType<typeof vi.fn>;
name: string;
type: string;
};
beforeEach(async () => {
// Create mock provider
mockProvider = {
chat: vi.fn(),
chatStream: vi.fn(),
embed: vi.fn(),
listModels: vi.fn(),
checkHealth: vi.fn(),
name: "Test Provider",
type: "ollama",
};
// Create mock manager service
mockManagerService = {
getDefaultProvider: vi.fn().mockResolvedValue(mockProvider),
};
const module: TestingModule = await Test.createTestingModule({
providers: [
LlmService,
{
provide: LlmManagerService,
useValue: mockManagerService,
},
],
}).compile();
service = module.get<LlmService>(LlmService);
});
it("should be defined", () => {
expect(service).toBeDefined();
});
describe("checkHealth", () => {
it("should delegate to provider and return healthy status", async () => {
const healthStatus: LlmProviderHealthStatus = {
healthy: true,
provider: "ollama",
endpoint: "http://localhost:11434",
models: ["llama3.2"],
};
mockProvider.checkHealth.mockResolvedValue(healthStatus);
const result = await service.checkHealth();
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.checkHealth).toHaveBeenCalled();
expect(result).toEqual(healthStatus);
});
it("should return unhealthy status on error", async () => {
mockProvider.checkHealth.mockRejectedValue(new Error("Connection failed"));
const result = await service.checkHealth();
expect(result.healthy).toBe(false);
expect(result.error).toContain("Connection failed");
});
it("should handle manager service failure", async () => {
mockManagerService.getDefaultProvider.mockRejectedValue(new Error("No provider configured"));
const result = await service.checkHealth();
expect(result.healthy).toBe(false);
expect(result.error).toContain("No provider configured");
});
});
describe("listModels", () => {
it("should delegate to provider and return models", async () => {
const models = ["llama3.2", "mistral"];
mockProvider.listModels.mockResolvedValue(models);
const result = await service.listModels();
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.listModels).toHaveBeenCalled();
expect(result).toEqual(models);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.listModels.mockRejectedValue(new Error("Failed to fetch models"));
await expect(service.listModels()).rejects.toThrow(ServiceUnavailableException);
});
});
describe("chat", () => {
const request: ChatRequestDto = {
model: "llama3.2",
messages: [{ role: "user", content: "Hi" }],
};
it("should delegate to provider and return response", async () => {
const response: ChatResponseDto = {
model: "llama3.2",
message: { role: "assistant", content: "Hello" },
done: true,
totalDuration: 1000,
};
mockProvider.chat.mockResolvedValue(response);
const result = await service.chat(request);
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.chat).toHaveBeenCalledWith(request);
expect(result).toEqual(response);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.chat.mockRejectedValue(new Error("Chat failed"));
await expect(service.chat(request)).rejects.toThrow(ServiceUnavailableException);
});
});
describe("chatStream", () => {
const request: ChatRequestDto = {
model: "llama3.2",
messages: [{ role: "user", content: "Hi" }],
stream: true,
};
it("should delegate to provider and yield chunks", async () => {
async function* mockGenerator(): AsyncGenerator<ChatResponseDto> {
yield {
model: "llama3.2",
message: { role: "assistant", content: "Hello" },
done: false,
};
yield {
model: "llama3.2",
message: { role: "assistant", content: " world" },
done: true,
};
}
mockProvider.chatStream.mockReturnValue(mockGenerator());
const chunks: ChatResponseDto[] = [];
for await (const chunk of service.chatStream(request)) {
chunks.push(chunk);
}
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.chatStream).toHaveBeenCalledWith(request);
expect(chunks.length).toBe(2);
expect(chunks[0].message.content).toBe("Hello");
expect(chunks[1].message.content).toBe(" world");
});
it("should throw ServiceUnavailableException on error", async () => {
async function* errorGenerator(): AsyncGenerator<ChatResponseDto> {
throw new Error("Stream failed");
}
mockProvider.chatStream.mockReturnValue(errorGenerator());
const generator = service.chatStream(request);
await expect(generator.next()).rejects.toThrow(ServiceUnavailableException);
});
});
describe("embed", () => {
const request: EmbedRequestDto = {
model: "llama3.2",
input: ["test text"],
};
it("should delegate to provider and return embeddings", async () => {
const response: EmbedResponseDto = {
model: "llama3.2",
embeddings: [[0.1, 0.2, 0.3]],
totalDuration: 500,
};
mockProvider.embed.mockResolvedValue(response);
const result = await service.embed(request);
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.embed).toHaveBeenCalledWith(request);
expect(result).toEqual(response);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.embed.mockRejectedValue(new Error("Embedding failed"));
await expect(service.embed(request)).rejects.toThrow(ServiceUnavailableException);
});
});
});