feat(#122): create LLM provider interface
Implemented abstract LLM provider interface to enable multi-provider support. Key components: - LlmProviderInterface: Abstract contract for all LLM providers - LlmProviderConfig: Base configuration interface - LlmProviderHealthStatus: Standardized health check response - LlmProviderType: Type discriminator for runtime checks Methods defined: - initialize(): Async provider setup - checkHealth(): Health status verification - listModels(): Available model enumeration - chat(): Synchronous completion - chatStream(): Streaming completion (async generator) - embed(): Embedding generation - getConfig(): Configuration access All methods fully documented with JSDoc. 13 tests written and passing. Type checking verified. Fixes #122 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
1
apps/api/src/llm/providers/index.ts
Normal file
1
apps/api/src/llm/providers/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export * from "./llm-provider.interface";
|
||||
227
apps/api/src/llm/providers/llm-provider.interface.spec.ts
Normal file
227
apps/api/src/llm/providers/llm-provider.interface.spec.ts
Normal file
@@ -0,0 +1,227 @@
|
||||
import { describe, it, expect, beforeEach } from "vitest";
|
||||
import type {
|
||||
LlmProviderInterface,
|
||||
LlmProviderConfig,
|
||||
LlmProviderHealthStatus,
|
||||
} from "./llm-provider.interface";
|
||||
import type { ChatRequestDto, ChatResponseDto, EmbedRequestDto, EmbedResponseDto } from "../dto";
|
||||
|
||||
/**
|
||||
* Mock provider implementation for testing the interface contract
|
||||
*/
|
||||
class MockLlmProvider implements LlmProviderInterface {
|
||||
readonly name = "mock";
|
||||
readonly type = "ollama" as const;
|
||||
private initialized = false;
|
||||
|
||||
constructor(private config: LlmProviderConfig) {}
|
||||
|
||||
async initialize(): Promise<void> {
|
||||
this.initialized = true;
|
||||
}
|
||||
|
||||
async checkHealth(): Promise<LlmProviderHealthStatus> {
|
||||
return {
|
||||
healthy: this.initialized,
|
||||
provider: this.name,
|
||||
endpoint: this.config.endpoint,
|
||||
};
|
||||
}
|
||||
|
||||
async listModels(): Promise<string[]> {
|
||||
if (!this.initialized) throw new Error("Provider not initialized");
|
||||
return ["mock-model-1", "mock-model-2"];
|
||||
}
|
||||
|
||||
async chat(request: ChatRequestDto): Promise<ChatResponseDto> {
|
||||
if (!this.initialized) throw new Error("Provider not initialized");
|
||||
return {
|
||||
model: request.model,
|
||||
message: { role: "assistant", content: "Mock response" },
|
||||
done: true,
|
||||
};
|
||||
}
|
||||
|
||||
async *chatStream(request: ChatRequestDto): AsyncGenerator<ChatResponseDto> {
|
||||
if (!this.initialized) throw new Error("Provider not initialized");
|
||||
yield {
|
||||
model: request.model,
|
||||
message: { role: "assistant", content: "Mock " },
|
||||
done: false,
|
||||
};
|
||||
yield {
|
||||
model: request.model,
|
||||
message: { role: "assistant", content: "stream" },
|
||||
done: true,
|
||||
};
|
||||
}
|
||||
|
||||
async embed(request: EmbedRequestDto): Promise<EmbedResponseDto> {
|
||||
if (!this.initialized) throw new Error("Provider not initialized");
|
||||
return {
|
||||
model: request.model,
|
||||
embeddings: request.input.map(() => [0.1, 0.2, 0.3]),
|
||||
};
|
||||
}
|
||||
|
||||
getConfig(): LlmProviderConfig {
|
||||
return { ...this.config };
|
||||
}
|
||||
}
|
||||
|
||||
describe("LlmProviderInterface", () => {
|
||||
let provider: LlmProviderInterface;
|
||||
|
||||
beforeEach(() => {
|
||||
provider = new MockLlmProvider({
|
||||
endpoint: "http://localhost:8000",
|
||||
timeout: 30000,
|
||||
});
|
||||
});
|
||||
|
||||
describe("initialization", () => {
|
||||
it("should initialize successfully", async () => {
|
||||
await expect(provider.initialize()).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it("should have name and type properties", () => {
|
||||
expect(provider.name).toBeDefined();
|
||||
expect(provider.type).toBeDefined();
|
||||
expect(typeof provider.name).toBe("string");
|
||||
expect(typeof provider.type).toBe("string");
|
||||
});
|
||||
});
|
||||
|
||||
describe("checkHealth", () => {
|
||||
it("should return health status", async () => {
|
||||
await provider.initialize();
|
||||
const health = await provider.checkHealth();
|
||||
|
||||
expect(health).toHaveProperty("healthy");
|
||||
expect(health).toHaveProperty("provider");
|
||||
expect(health.healthy).toBe(true);
|
||||
expect(health.provider).toBe("mock");
|
||||
});
|
||||
|
||||
it("should include endpoint in health status", async () => {
|
||||
await provider.initialize();
|
||||
const health = await provider.checkHealth();
|
||||
|
||||
expect(health.endpoint).toBe("http://localhost:8000");
|
||||
});
|
||||
});
|
||||
|
||||
describe("listModels", () => {
|
||||
it("should return array of model names", async () => {
|
||||
await provider.initialize();
|
||||
const models = await provider.listModels();
|
||||
|
||||
expect(Array.isArray(models)).toBe(true);
|
||||
expect(models.length).toBeGreaterThan(0);
|
||||
models.forEach((model) => expect(typeof model).toBe("string"));
|
||||
});
|
||||
|
||||
it("should throw if not initialized", async () => {
|
||||
await expect(provider.listModels()).rejects.toThrow("not initialized");
|
||||
});
|
||||
});
|
||||
|
||||
describe("chat", () => {
|
||||
it("should return chat response", async () => {
|
||||
await provider.initialize();
|
||||
const request: ChatRequestDto = {
|
||||
model: "test-model",
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
};
|
||||
|
||||
const response = await provider.chat(request);
|
||||
|
||||
expect(response).toHaveProperty("model");
|
||||
expect(response).toHaveProperty("message");
|
||||
expect(response).toHaveProperty("done");
|
||||
expect(response.message.role).toBe("assistant");
|
||||
expect(typeof response.message.content).toBe("string");
|
||||
});
|
||||
|
||||
it("should throw if not initialized", async () => {
|
||||
const request: ChatRequestDto = {
|
||||
model: "test-model",
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
};
|
||||
|
||||
await expect(provider.chat(request)).rejects.toThrow("not initialized");
|
||||
});
|
||||
});
|
||||
|
||||
describe("chatStream", () => {
|
||||
it("should yield chat response chunks", async () => {
|
||||
await provider.initialize();
|
||||
const request: ChatRequestDto = {
|
||||
model: "test-model",
|
||||
messages: [{ role: "user", content: "Hello" }],
|
||||
};
|
||||
|
||||
const chunks: ChatResponseDto[] = [];
|
||||
for await (const chunk of provider.chatStream(request)) {
|
||||
chunks.push(chunk);
|
||||
}
|
||||
|
||||
expect(chunks.length).toBeGreaterThan(0);
|
||||
chunks.forEach((chunk) => {
|
||||
expect(chunk).toHaveProperty("model");
|
||||
expect(chunk).toHaveProperty("message");
|
||||
expect(chunk).toHaveProperty("done");
|
||||
});
|
||||
expect(chunks[chunks.length - 1].done).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("embed", () => {
|
||||
it("should return embeddings", async () => {
|
||||
await provider.initialize();
|
||||
const request: EmbedRequestDto = {
|
||||
model: "test-model",
|
||||
input: ["text1", "text2"],
|
||||
};
|
||||
|
||||
const response = await provider.embed(request);
|
||||
|
||||
expect(response).toHaveProperty("model");
|
||||
expect(response).toHaveProperty("embeddings");
|
||||
expect(Array.isArray(response.embeddings)).toBe(true);
|
||||
expect(response.embeddings.length).toBe(request.input.length);
|
||||
response.embeddings.forEach((embedding) => {
|
||||
expect(Array.isArray(embedding)).toBe(true);
|
||||
expect(embedding.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
it("should throw if not initialized", async () => {
|
||||
const request: EmbedRequestDto = {
|
||||
model: "test-model",
|
||||
input: ["text1"],
|
||||
};
|
||||
|
||||
await expect(provider.embed(request)).rejects.toThrow("not initialized");
|
||||
});
|
||||
});
|
||||
|
||||
describe("getConfig", () => {
|
||||
it("should return provider configuration", () => {
|
||||
const config = provider.getConfig();
|
||||
|
||||
expect(config).toHaveProperty("endpoint");
|
||||
expect(config).toHaveProperty("timeout");
|
||||
expect(config.endpoint).toBe("http://localhost:8000");
|
||||
expect(config.timeout).toBe(30000);
|
||||
});
|
||||
|
||||
it("should return a copy of config, not reference", () => {
|
||||
const config1 = provider.getConfig();
|
||||
const config2 = provider.getConfig();
|
||||
|
||||
expect(config1).not.toBe(config2);
|
||||
expect(config1).toEqual(config2);
|
||||
});
|
||||
});
|
||||
});
|
||||
160
apps/api/src/llm/providers/llm-provider.interface.ts
Normal file
160
apps/api/src/llm/providers/llm-provider.interface.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
import type { ChatRequestDto, ChatResponseDto, EmbedRequestDto, EmbedResponseDto } from "../dto";
|
||||
|
||||
/**
|
||||
* Base configuration for all LLM providers.
|
||||
* Provider-specific implementations can extend this interface.
|
||||
*/
|
||||
export interface LlmProviderConfig {
|
||||
/**
|
||||
* Provider endpoint URL (e.g., "http://localhost:11434" for Ollama)
|
||||
*/
|
||||
endpoint: string;
|
||||
|
||||
/**
|
||||
* Request timeout in milliseconds
|
||||
* @default 30000
|
||||
*/
|
||||
timeout?: number;
|
||||
|
||||
/**
|
||||
* Additional provider-specific configuration
|
||||
*/
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Health status returned by provider health checks
|
||||
*/
|
||||
export interface LlmProviderHealthStatus {
|
||||
/**
|
||||
* Whether the provider is healthy and ready to accept requests
|
||||
*/
|
||||
healthy: boolean;
|
||||
|
||||
/**
|
||||
* Provider name (e.g., "ollama", "claude", "openai")
|
||||
*/
|
||||
provider: string;
|
||||
|
||||
/**
|
||||
* Provider endpoint being checked
|
||||
*/
|
||||
endpoint?: string;
|
||||
|
||||
/**
|
||||
* Error message if unhealthy
|
||||
*/
|
||||
error?: string;
|
||||
|
||||
/**
|
||||
* Available models (optional, for providers that support listing)
|
||||
*/
|
||||
models?: string[];
|
||||
|
||||
/**
|
||||
* Additional metadata about the health check
|
||||
*/
|
||||
metadata?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provider type discriminator for runtime type checking
|
||||
*/
|
||||
export type LlmProviderType = "ollama" | "claude" | "openai";
|
||||
|
||||
/**
|
||||
* Abstract interface that all LLM providers must implement.
|
||||
* Supports multiple LLM backends (Ollama, Claude, OpenAI, etc.)
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* class OllamaProvider implements LlmProviderInterface {
|
||||
* readonly name = "ollama";
|
||||
* readonly type = "ollama";
|
||||
*
|
||||
* constructor(config: OllamaProviderConfig) {
|
||||
* // Initialize provider
|
||||
* }
|
||||
*
|
||||
* async initialize(): Promise<void> {
|
||||
* // Setup provider connection
|
||||
* }
|
||||
*
|
||||
* async chat(request: ChatRequestDto): Promise<ChatResponseDto> {
|
||||
* // Implement chat completion
|
||||
* }
|
||||
*
|
||||
* // ... implement other methods
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export interface LlmProviderInterface {
|
||||
/**
|
||||
* Human-readable provider name (e.g., "Ollama", "Claude", "OpenAI")
|
||||
*/
|
||||
readonly name: string;
|
||||
|
||||
/**
|
||||
* Provider type discriminator for runtime type checking
|
||||
*/
|
||||
readonly type: LlmProviderType;
|
||||
|
||||
/**
|
||||
* Initialize the provider connection and resources.
|
||||
* Called once during provider instantiation.
|
||||
*
|
||||
* @throws {Error} If initialization fails
|
||||
*/
|
||||
initialize(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Check if the provider is healthy and ready to accept requests.
|
||||
*
|
||||
* @returns Health status with provider details
|
||||
*/
|
||||
checkHealth(): Promise<LlmProviderHealthStatus>;
|
||||
|
||||
/**
|
||||
* List all available models from this provider.
|
||||
*
|
||||
* @returns Array of model names
|
||||
* @throws {Error} If provider is not initialized or request fails
|
||||
*/
|
||||
listModels(): Promise<string[]>;
|
||||
|
||||
/**
|
||||
* Perform a synchronous chat completion.
|
||||
*
|
||||
* @param request - Chat request with messages and configuration
|
||||
* @returns Complete chat response
|
||||
* @throws {Error} If provider is not initialized or request fails
|
||||
*/
|
||||
chat(request: ChatRequestDto): Promise<ChatResponseDto>;
|
||||
|
||||
/**
|
||||
* Perform a streaming chat completion.
|
||||
* Yields response chunks as they arrive from the provider.
|
||||
*
|
||||
* @param request - Chat request with messages and configuration
|
||||
* @yields Chat response chunks
|
||||
* @throws {Error} If provider is not initialized or request fails
|
||||
*/
|
||||
chatStream(request: ChatRequestDto): AsyncGenerator<ChatResponseDto>;
|
||||
|
||||
/**
|
||||
* Generate embeddings for the given input texts.
|
||||
*
|
||||
* @param request - Embedding request with model and input texts
|
||||
* @returns Embeddings response with vector arrays
|
||||
* @throws {Error} If provider is not initialized or request fails
|
||||
*/
|
||||
embed(request: EmbedRequestDto): Promise<EmbedResponseDto>;
|
||||
|
||||
/**
|
||||
* Get the current provider configuration.
|
||||
* Should return a copy to prevent external modification.
|
||||
*
|
||||
* @returns Provider configuration object
|
||||
*/
|
||||
getConfig(): LlmProviderConfig;
|
||||
}
|
||||
81
docs/scratchpads/122-llm-provider-interface.md
Normal file
81
docs/scratchpads/122-llm-provider-interface.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Issue #122: Create LLM Provider Interface
|
||||
|
||||
## Objective
|
||||
|
||||
Define the abstract contract that all LLM providers (Ollama, Claude, OpenAI) must implement to enable multi-provider support.
|
||||
|
||||
## Approach
|
||||
|
||||
### Current State
|
||||
|
||||
- `LlmService` is hardcoded to Ollama
|
||||
- Direct coupling to `ollama` npm package
|
||||
- Methods: `chat()`, `chatStream()`, `embed()`, `listModels()`, `checkHealth()`
|
||||
|
||||
### Target Architecture
|
||||
|
||||
```
|
||||
LlmProviderInterface (abstract)
|
||||
├── OllamaProvider (implements)
|
||||
├── ClaudeProvider (implements)
|
||||
└── OpenAIProvider (implements)
|
||||
|
||||
LlmManagerService
|
||||
└── manages provider instances
|
||||
└── routes requests to appropriate provider
|
||||
```
|
||||
|
||||
### Interface Methods (from current LlmService)
|
||||
|
||||
1. `chat(request)` - Synchronous chat completion
|
||||
2. `chatStream(request)` - Streaming chat completion (async generator)
|
||||
3. `embed(request)` - Generate embeddings
|
||||
4. `listModels()` - List available models
|
||||
5. `checkHealth()` - Health check
|
||||
|
||||
### Provider Configuration
|
||||
|
||||
Each provider needs different config:
|
||||
|
||||
- Ollama: `{ host, timeout }`
|
||||
- Claude: `{ apiKey, baseUrl?, timeout? }`
|
||||
- OpenAI: `{ apiKey, baseUrl?, organization?, timeout? }`
|
||||
|
||||
Need generic config interface that providers can extend.
|
||||
|
||||
## Progress
|
||||
|
||||
- [x] Write interface tests (TDD - RED)
|
||||
- [x] Create base types and DTOs
|
||||
- [x] Implement LlmProviderInterface
|
||||
- [x] Implement LlmProviderConfig interface
|
||||
- [x] Add JSDoc documentation
|
||||
- [x] Run tests (TDD - GREEN) - All 13 tests passed
|
||||
- [x] Type checking passed
|
||||
- [x] Refactor if needed (TDD - REFACTOR) - No refactoring needed
|
||||
|
||||
## Testing
|
||||
|
||||
Created a mock provider implementation to test interface contract.
|
||||
|
||||
**Test Results:**
|
||||
|
||||
```
|
||||
✓ src/llm/providers/llm-provider.interface.spec.ts (13 tests) 7ms
|
||||
- initialization (2 tests)
|
||||
- checkHealth (2 tests)
|
||||
- listModels (2 tests)
|
||||
- chat (2 tests)
|
||||
- chatStream (1 test)
|
||||
- embed (2 tests)
|
||||
- getConfig (2 tests)
|
||||
```
|
||||
|
||||
**Type Check:** ✅ Passed
|
||||
|
||||
## Notes
|
||||
|
||||
- Interface should be provider-agnostic
|
||||
- Use existing DTOs from current LlmService
|
||||
- Consider async initialization for providers
|
||||
- Health check should return standardized status
|
||||
Reference in New Issue
Block a user