feat(#122): create LLM provider interface

Implemented abstract LLM provider interface to enable multi-provider support.

Key components:
- LlmProviderInterface: Abstract contract for all LLM providers
- LlmProviderConfig: Base configuration interface
- LlmProviderHealthStatus: Standardized health check response
- LlmProviderType: Type discriminator for runtime checks

Methods defined:
- initialize(): Async provider setup
- checkHealth(): Health status verification
- listModels(): Available model enumeration
- chat(): Synchronous completion
- chatStream(): Streaming completion (async generator)
- embed(): Embedding generation
- getConfig(): Configuration access

All methods fully documented with JSDoc.
13 tests written and passing.
Type checking verified.

Fixes #122

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-31 11:38:38 -06:00
parent a0d4249967
commit dc4f6cbb9d
4 changed files with 469 additions and 0 deletions

View File

@@ -0,0 +1 @@
export * from "./llm-provider.interface";

View File

@@ -0,0 +1,227 @@
import { describe, it, expect, beforeEach } from "vitest";
import type {
LlmProviderInterface,
LlmProviderConfig,
LlmProviderHealthStatus,
} from "./llm-provider.interface";
import type { ChatRequestDto, ChatResponseDto, EmbedRequestDto, EmbedResponseDto } from "../dto";
/**
* Mock provider implementation for testing the interface contract
*/
class MockLlmProvider implements LlmProviderInterface {
readonly name = "mock";
readonly type = "ollama" as const;
private initialized = false;
constructor(private config: LlmProviderConfig) {}
async initialize(): Promise<void> {
this.initialized = true;
}
async checkHealth(): Promise<LlmProviderHealthStatus> {
return {
healthy: this.initialized,
provider: this.name,
endpoint: this.config.endpoint,
};
}
async listModels(): Promise<string[]> {
if (!this.initialized) throw new Error("Provider not initialized");
return ["mock-model-1", "mock-model-2"];
}
async chat(request: ChatRequestDto): Promise<ChatResponseDto> {
if (!this.initialized) throw new Error("Provider not initialized");
return {
model: request.model,
message: { role: "assistant", content: "Mock response" },
done: true,
};
}
async *chatStream(request: ChatRequestDto): AsyncGenerator<ChatResponseDto> {
if (!this.initialized) throw new Error("Provider not initialized");
yield {
model: request.model,
message: { role: "assistant", content: "Mock " },
done: false,
};
yield {
model: request.model,
message: { role: "assistant", content: "stream" },
done: true,
};
}
async embed(request: EmbedRequestDto): Promise<EmbedResponseDto> {
if (!this.initialized) throw new Error("Provider not initialized");
return {
model: request.model,
embeddings: request.input.map(() => [0.1, 0.2, 0.3]),
};
}
getConfig(): LlmProviderConfig {
return { ...this.config };
}
}
describe("LlmProviderInterface", () => {
let provider: LlmProviderInterface;
beforeEach(() => {
provider = new MockLlmProvider({
endpoint: "http://localhost:8000",
timeout: 30000,
});
});
describe("initialization", () => {
it("should initialize successfully", async () => {
await expect(provider.initialize()).resolves.toBeUndefined();
});
it("should have name and type properties", () => {
expect(provider.name).toBeDefined();
expect(provider.type).toBeDefined();
expect(typeof provider.name).toBe("string");
expect(typeof provider.type).toBe("string");
});
});
describe("checkHealth", () => {
it("should return health status", async () => {
await provider.initialize();
const health = await provider.checkHealth();
expect(health).toHaveProperty("healthy");
expect(health).toHaveProperty("provider");
expect(health.healthy).toBe(true);
expect(health.provider).toBe("mock");
});
it("should include endpoint in health status", async () => {
await provider.initialize();
const health = await provider.checkHealth();
expect(health.endpoint).toBe("http://localhost:8000");
});
});
describe("listModels", () => {
it("should return array of model names", async () => {
await provider.initialize();
const models = await provider.listModels();
expect(Array.isArray(models)).toBe(true);
expect(models.length).toBeGreaterThan(0);
models.forEach((model) => expect(typeof model).toBe("string"));
});
it("should throw if not initialized", async () => {
await expect(provider.listModels()).rejects.toThrow("not initialized");
});
});
describe("chat", () => {
it("should return chat response", async () => {
await provider.initialize();
const request: ChatRequestDto = {
model: "test-model",
messages: [{ role: "user", content: "Hello" }],
};
const response = await provider.chat(request);
expect(response).toHaveProperty("model");
expect(response).toHaveProperty("message");
expect(response).toHaveProperty("done");
expect(response.message.role).toBe("assistant");
expect(typeof response.message.content).toBe("string");
});
it("should throw if not initialized", async () => {
const request: ChatRequestDto = {
model: "test-model",
messages: [{ role: "user", content: "Hello" }],
};
await expect(provider.chat(request)).rejects.toThrow("not initialized");
});
});
describe("chatStream", () => {
it("should yield chat response chunks", async () => {
await provider.initialize();
const request: ChatRequestDto = {
model: "test-model",
messages: [{ role: "user", content: "Hello" }],
};
const chunks: ChatResponseDto[] = [];
for await (const chunk of provider.chatStream(request)) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(0);
chunks.forEach((chunk) => {
expect(chunk).toHaveProperty("model");
expect(chunk).toHaveProperty("message");
expect(chunk).toHaveProperty("done");
});
expect(chunks[chunks.length - 1].done).toBe(true);
});
});
describe("embed", () => {
it("should return embeddings", async () => {
await provider.initialize();
const request: EmbedRequestDto = {
model: "test-model",
input: ["text1", "text2"],
};
const response = await provider.embed(request);
expect(response).toHaveProperty("model");
expect(response).toHaveProperty("embeddings");
expect(Array.isArray(response.embeddings)).toBe(true);
expect(response.embeddings.length).toBe(request.input.length);
response.embeddings.forEach((embedding) => {
expect(Array.isArray(embedding)).toBe(true);
expect(embedding.length).toBeGreaterThan(0);
});
});
it("should throw if not initialized", async () => {
const request: EmbedRequestDto = {
model: "test-model",
input: ["text1"],
};
await expect(provider.embed(request)).rejects.toThrow("not initialized");
});
});
describe("getConfig", () => {
it("should return provider configuration", () => {
const config = provider.getConfig();
expect(config).toHaveProperty("endpoint");
expect(config).toHaveProperty("timeout");
expect(config.endpoint).toBe("http://localhost:8000");
expect(config.timeout).toBe(30000);
});
it("should return a copy of config, not reference", () => {
const config1 = provider.getConfig();
const config2 = provider.getConfig();
expect(config1).not.toBe(config2);
expect(config1).toEqual(config2);
});
});
});

View File

@@ -0,0 +1,160 @@
import type { ChatRequestDto, ChatResponseDto, EmbedRequestDto, EmbedResponseDto } from "../dto";
/**
* Base configuration for all LLM providers.
* Provider-specific implementations can extend this interface.
*/
export interface LlmProviderConfig {
/**
* Provider endpoint URL (e.g., "http://localhost:11434" for Ollama)
*/
endpoint: string;
/**
* Request timeout in milliseconds
* @default 30000
*/
timeout?: number;
/**
* Additional provider-specific configuration
*/
[key: string]: unknown;
}
/**
* Health status returned by provider health checks
*/
export interface LlmProviderHealthStatus {
/**
* Whether the provider is healthy and ready to accept requests
*/
healthy: boolean;
/**
* Provider name (e.g., "ollama", "claude", "openai")
*/
provider: string;
/**
* Provider endpoint being checked
*/
endpoint?: string;
/**
* Error message if unhealthy
*/
error?: string;
/**
* Available models (optional, for providers that support listing)
*/
models?: string[];
/**
* Additional metadata about the health check
*/
metadata?: Record<string, unknown>;
}
/**
* Provider type discriminator for runtime type checking
*/
export type LlmProviderType = "ollama" | "claude" | "openai";
/**
* Abstract interface that all LLM providers must implement.
* Supports multiple LLM backends (Ollama, Claude, OpenAI, etc.)
*
* @example
* ```typescript
* class OllamaProvider implements LlmProviderInterface {
* readonly name = "ollama";
* readonly type = "ollama";
*
* constructor(config: OllamaProviderConfig) {
* // Initialize provider
* }
*
* async initialize(): Promise<void> {
* // Setup provider connection
* }
*
* async chat(request: ChatRequestDto): Promise<ChatResponseDto> {
* // Implement chat completion
* }
*
* // ... implement other methods
* }
* ```
*/
export interface LlmProviderInterface {
/**
* Human-readable provider name (e.g., "Ollama", "Claude", "OpenAI")
*/
readonly name: string;
/**
* Provider type discriminator for runtime type checking
*/
readonly type: LlmProviderType;
/**
* Initialize the provider connection and resources.
* Called once during provider instantiation.
*
* @throws {Error} If initialization fails
*/
initialize(): Promise<void>;
/**
* Check if the provider is healthy and ready to accept requests.
*
* @returns Health status with provider details
*/
checkHealth(): Promise<LlmProviderHealthStatus>;
/**
* List all available models from this provider.
*
* @returns Array of model names
* @throws {Error} If provider is not initialized or request fails
*/
listModels(): Promise<string[]>;
/**
* Perform a synchronous chat completion.
*
* @param request - Chat request with messages and configuration
* @returns Complete chat response
* @throws {Error} If provider is not initialized or request fails
*/
chat(request: ChatRequestDto): Promise<ChatResponseDto>;
/**
* Perform a streaming chat completion.
* Yields response chunks as they arrive from the provider.
*
* @param request - Chat request with messages and configuration
* @yields Chat response chunks
* @throws {Error} If provider is not initialized or request fails
*/
chatStream(request: ChatRequestDto): AsyncGenerator<ChatResponseDto>;
/**
* Generate embeddings for the given input texts.
*
* @param request - Embedding request with model and input texts
* @returns Embeddings response with vector arrays
* @throws {Error} If provider is not initialized or request fails
*/
embed(request: EmbedRequestDto): Promise<EmbedResponseDto>;
/**
* Get the current provider configuration.
* Should return a copy to prevent external modification.
*
* @returns Provider configuration object
*/
getConfig(): LlmProviderConfig;
}