Files
stack/apps/api/src/llm/llm.service.spec.ts
Jason Woltje 1f97e6de40 feat(#127): refactor LlmService to use provider pattern
Refactor LlmService to delegate to LlmManagerService instead of using
Ollama directly. This enables multiple provider support and user-specific
provider configuration.

Changes:
- Remove direct Ollama client from LlmService
- Delegate all LLM operations to provider via LlmManagerService
- Update health status to use provider-agnostic interface
- Add PrismaModule to LlmModule for manager service
- Maintain backward compatibility with existing API
- Achieve 89.74% test coverage

Fixes #127

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-31 12:33:56 -06:00

220 lines
6.8 KiB
TypeScript

import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { ServiceUnavailableException } from "@nestjs/common";
import { LlmService } from "./llm.service";
import { LlmManagerService } from "./llm-manager.service";
import type { ChatRequestDto, EmbedRequestDto, ChatResponseDto, EmbedResponseDto } from "./dto";
import type {
LlmProviderInterface,
LlmProviderHealthStatus,
} from "./providers/llm-provider.interface";
describe("LlmService", () => {
let service: LlmService;
let mockManagerService: {
getDefaultProvider: ReturnType<typeof vi.fn>;
};
let mockProvider: {
chat: ReturnType<typeof vi.fn>;
chatStream: ReturnType<typeof vi.fn>;
embed: ReturnType<typeof vi.fn>;
listModels: ReturnType<typeof vi.fn>;
checkHealth: ReturnType<typeof vi.fn>;
name: string;
type: string;
};
beforeEach(async () => {
// Create mock provider
mockProvider = {
chat: vi.fn(),
chatStream: vi.fn(),
embed: vi.fn(),
listModels: vi.fn(),
checkHealth: vi.fn(),
name: "Test Provider",
type: "ollama",
};
// Create mock manager service
mockManagerService = {
getDefaultProvider: vi.fn().mockResolvedValue(mockProvider),
};
const module: TestingModule = await Test.createTestingModule({
providers: [
LlmService,
{
provide: LlmManagerService,
useValue: mockManagerService,
},
],
}).compile();
service = module.get<LlmService>(LlmService);
});
it("should be defined", () => {
expect(service).toBeDefined();
});
describe("checkHealth", () => {
it("should delegate to provider and return healthy status", async () => {
const healthStatus: LlmProviderHealthStatus = {
healthy: true,
provider: "ollama",
endpoint: "http://localhost:11434",
models: ["llama3.2"],
};
mockProvider.checkHealth.mockResolvedValue(healthStatus);
const result = await service.checkHealth();
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.checkHealth).toHaveBeenCalled();
expect(result).toEqual(healthStatus);
});
it("should return unhealthy status on error", async () => {
mockProvider.checkHealth.mockRejectedValue(new Error("Connection failed"));
const result = await service.checkHealth();
expect(result.healthy).toBe(false);
expect(result.error).toContain("Connection failed");
});
it("should handle manager service failure", async () => {
mockManagerService.getDefaultProvider.mockRejectedValue(new Error("No provider configured"));
const result = await service.checkHealth();
expect(result.healthy).toBe(false);
expect(result.error).toContain("No provider configured");
});
});
describe("listModels", () => {
it("should delegate to provider and return models", async () => {
const models = ["llama3.2", "mistral"];
mockProvider.listModels.mockResolvedValue(models);
const result = await service.listModels();
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.listModels).toHaveBeenCalled();
expect(result).toEqual(models);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.listModels.mockRejectedValue(new Error("Failed to fetch models"));
await expect(service.listModels()).rejects.toThrow(ServiceUnavailableException);
});
});
describe("chat", () => {
const request: ChatRequestDto = {
model: "llama3.2",
messages: [{ role: "user", content: "Hi" }],
};
it("should delegate to provider and return response", async () => {
const response: ChatResponseDto = {
model: "llama3.2",
message: { role: "assistant", content: "Hello" },
done: true,
totalDuration: 1000,
};
mockProvider.chat.mockResolvedValue(response);
const result = await service.chat(request);
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.chat).toHaveBeenCalledWith(request);
expect(result).toEqual(response);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.chat.mockRejectedValue(new Error("Chat failed"));
await expect(service.chat(request)).rejects.toThrow(ServiceUnavailableException);
});
});
describe("chatStream", () => {
const request: ChatRequestDto = {
model: "llama3.2",
messages: [{ role: "user", content: "Hi" }],
stream: true,
};
it("should delegate to provider and yield chunks", async () => {
async function* mockGenerator(): AsyncGenerator<ChatResponseDto> {
yield {
model: "llama3.2",
message: { role: "assistant", content: "Hello" },
done: false,
};
yield {
model: "llama3.2",
message: { role: "assistant", content: " world" },
done: true,
};
}
mockProvider.chatStream.mockReturnValue(mockGenerator());
const chunks: ChatResponseDto[] = [];
for await (const chunk of service.chatStream(request)) {
chunks.push(chunk);
}
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.chatStream).toHaveBeenCalledWith(request);
expect(chunks.length).toBe(2);
expect(chunks[0].message.content).toBe("Hello");
expect(chunks[1].message.content).toBe(" world");
});
it("should throw ServiceUnavailableException on error", async () => {
async function* errorGenerator(): AsyncGenerator<ChatResponseDto> {
throw new Error("Stream failed");
}
mockProvider.chatStream.mockReturnValue(errorGenerator());
const generator = service.chatStream(request);
await expect(generator.next()).rejects.toThrow(ServiceUnavailableException);
});
});
describe("embed", () => {
const request: EmbedRequestDto = {
model: "llama3.2",
input: ["test text"],
};
it("should delegate to provider and return embeddings", async () => {
const response: EmbedResponseDto = {
model: "llama3.2",
embeddings: [[0.1, 0.2, 0.3]],
totalDuration: 500,
};
mockProvider.embed.mockResolvedValue(response);
const result = await service.embed(request);
expect(mockManagerService.getDefaultProvider).toHaveBeenCalled();
expect(mockProvider.embed).toHaveBeenCalledWith(request);
expect(result).toEqual(response);
});
it("should throw ServiceUnavailableException on error", async () => {
mockProvider.embed.mockRejectedValue(new Error("Embedding failed"));
await expect(service.embed(request)).rejects.toThrow(ServiceUnavailableException);
});
});
});