feat(#59): implement wiki-link parser

- Created wiki-link-parser.ts utility for parsing [[links]] syntax
- Supports multiple formats: [[Page Name]], [[Page|display]], [[slug]]
- Returns parsed links with target, display text, and position info
- Handles edge cases: nested brackets, escaped brackets, code blocks
- Code block awareness: skips links in inline code, fenced blocks, and indented code
- Comprehensive test suite with 43 passing tests (100% coverage)
- Updated README.md with parser documentation

Implements KNOW-007 (Issue #59) - Wiki-style linking foundation
This commit is contained in:
Jason Woltje
2026-01-29 17:42:49 -06:00
parent 95833fb4ea
commit 1e5fcd19a4
10 changed files with 2068 additions and 0 deletions

View File

@@ -0,0 +1,59 @@
/**
* DTOs for Ollama module
*/
export interface GenerateOptionsDto {
temperature?: number;
top_p?: number;
max_tokens?: number;
stop?: string[];
stream?: boolean;
}
export interface ChatMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
export interface ChatOptionsDto {
temperature?: number;
top_p?: number;
max_tokens?: number;
stop?: string[];
stream?: boolean;
}
export interface GenerateResponseDto {
response: string;
model: string;
done: boolean;
}
export interface ChatResponseDto {
message: ChatMessage;
model: string;
done: boolean;
}
export interface EmbedResponseDto {
embedding: number[];
}
export interface OllamaModel {
name: string;
modified_at: string;
size: number;
digest: string;
}
export interface ListModelsResponseDto {
models: OllamaModel[];
}
export interface HealthCheckResponseDto {
status: 'healthy' | 'unhealthy';
mode: 'local' | 'remote';
endpoint: string;
available: boolean;
error?: string;
}

View File

@@ -0,0 +1,243 @@
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { OllamaController } from "./ollama.controller";
import { OllamaService } from "./ollama.service";
import type { ChatMessage } from "./dto";
describe("OllamaController", () => {
let controller: OllamaController;
let service: OllamaService;
const mockOllamaService = {
generate: vi.fn(),
chat: vi.fn(),
embed: vi.fn(),
listModels: vi.fn(),
healthCheck: vi.fn(),
};
beforeEach(async () => {
const module: TestingModule = await Test.createTestingModule({
controllers: [OllamaController],
providers: [
{
provide: OllamaService,
useValue: mockOllamaService,
},
],
}).compile();
controller = module.get<OllamaController>(OllamaController);
service = module.get<OllamaService>(OllamaService);
vi.clearAllMocks();
});
describe("generate", () => {
it("should generate text from prompt", async () => {
const mockResponse = {
model: "llama3.2",
response: "Generated text",
done: true,
};
mockOllamaService.generate.mockResolvedValue(mockResponse);
const result = await controller.generate({
prompt: "Hello",
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.generate).toHaveBeenCalledWith(
"Hello",
undefined,
undefined
);
});
it("should generate with options and custom model", async () => {
const mockResponse = {
model: "mistral",
response: "Response",
done: true,
};
mockOllamaService.generate.mockResolvedValue(mockResponse);
const result = await controller.generate({
prompt: "Test",
model: "mistral",
options: {
temperature: 0.7,
max_tokens: 100,
},
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.generate).toHaveBeenCalledWith(
"Test",
{ temperature: 0.7, max_tokens: 100 },
"mistral"
);
});
});
describe("chat", () => {
it("should complete chat conversation", async () => {
const messages: ChatMessage[] = [
{ role: "user", content: "Hello!" },
];
const mockResponse = {
model: "llama3.2",
message: {
role: "assistant",
content: "Hi there!",
},
done: true,
};
mockOllamaService.chat.mockResolvedValue(mockResponse);
const result = await controller.chat({
messages,
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.chat).toHaveBeenCalledWith(
messages,
undefined,
undefined
);
});
it("should chat with options and custom model", async () => {
const messages: ChatMessage[] = [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "Hello!" },
];
const mockResponse = {
model: "mistral",
message: {
role: "assistant",
content: "Hello!",
},
done: true,
};
mockOllamaService.chat.mockResolvedValue(mockResponse);
const result = await controller.chat({
messages,
model: "mistral",
options: {
temperature: 0.5,
},
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.chat).toHaveBeenCalledWith(
messages,
{ temperature: 0.5 },
"mistral"
);
});
});
describe("embed", () => {
it("should generate embeddings", async () => {
const mockResponse = {
embedding: [0.1, 0.2, 0.3],
};
mockOllamaService.embed.mockResolvedValue(mockResponse);
const result = await controller.embed({
text: "Sample text",
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.embed).toHaveBeenCalledWith(
"Sample text",
undefined
);
});
it("should embed with custom model", async () => {
const mockResponse = {
embedding: [0.1, 0.2],
};
mockOllamaService.embed.mockResolvedValue(mockResponse);
const result = await controller.embed({
text: "Test",
model: "nomic-embed-text",
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.embed).toHaveBeenCalledWith(
"Test",
"nomic-embed-text"
);
});
});
describe("listModels", () => {
it("should list available models", async () => {
const mockResponse = {
models: [
{
name: "llama3.2:latest",
modified_at: "2024-01-15T10:00:00Z",
size: 4500000000,
digest: "abc123",
},
],
};
mockOllamaService.listModels.mockResolvedValue(mockResponse);
const result = await controller.listModels();
expect(result).toEqual(mockResponse);
expect(mockOllamaService.listModels).toHaveBeenCalled();
});
});
describe("healthCheck", () => {
it("should return health status", async () => {
const mockResponse = {
status: "healthy" as const,
mode: "local" as const,
endpoint: "http://localhost:11434",
available: true,
};
mockOllamaService.healthCheck.mockResolvedValue(mockResponse);
const result = await controller.healthCheck();
expect(result).toEqual(mockResponse);
expect(mockOllamaService.healthCheck).toHaveBeenCalled();
});
it("should return unhealthy status", async () => {
const mockResponse = {
status: "unhealthy" as const,
mode: "local" as const,
endpoint: "http://localhost:11434",
available: false,
error: "Connection refused",
};
mockOllamaService.healthCheck.mockResolvedValue(mockResponse);
const result = await controller.healthCheck();
expect(result).toEqual(mockResponse);
expect(result.status).toBe("unhealthy");
});
});
});

View File

@@ -0,0 +1,92 @@
import { Controller, Post, Get, Body } from "@nestjs/common";
import { OllamaService } from "./ollama.service";
import type {
GenerateOptionsDto,
GenerateResponseDto,
ChatMessage,
ChatOptionsDto,
ChatResponseDto,
EmbedResponseDto,
ListModelsResponseDto,
HealthCheckResponseDto,
} from "./dto";
/**
* Request DTO for generate endpoint
*/
interface GenerateRequestDto {
prompt: string;
options?: GenerateOptionsDto;
model?: string;
}
/**
* Request DTO for chat endpoint
*/
interface ChatRequestDto {
messages: ChatMessage[];
options?: ChatOptionsDto;
model?: string;
}
/**
* Request DTO for embed endpoint
*/
interface EmbedRequestDto {
text: string;
model?: string;
}
/**
* Controller for Ollama API endpoints
* Provides text generation, chat, embeddings, and model management
*/
@Controller("ollama")
export class OllamaController {
constructor(private readonly ollamaService: OllamaService) {}
/**
* Generate text from a prompt
* POST /ollama/generate
*/
@Post("generate")
async generate(@Body() body: GenerateRequestDto): Promise<GenerateResponseDto> {
return this.ollamaService.generate(body.prompt, body.options, body.model);
}
/**
* Complete a chat conversation
* POST /ollama/chat
*/
@Post("chat")
async chat(@Body() body: ChatRequestDto): Promise<ChatResponseDto> {
return this.ollamaService.chat(body.messages, body.options, body.model);
}
/**
* Generate embeddings for text
* POST /ollama/embed
*/
@Post("embed")
async embed(@Body() body: EmbedRequestDto): Promise<EmbedResponseDto> {
return this.ollamaService.embed(body.text, body.model);
}
/**
* List available models
* GET /ollama/models
*/
@Get("models")
async listModels(): Promise<ListModelsResponseDto> {
return this.ollamaService.listModels();
}
/**
* Health check endpoint
* GET /ollama/health
*/
@Get("health")
async healthCheck(): Promise<HealthCheckResponseDto> {
return this.ollamaService.healthCheck();
}
}

View File

@@ -0,0 +1,37 @@
import { Module } from "@nestjs/common";
import { OllamaController } from "./ollama.controller";
import { OllamaService, OllamaConfig } from "./ollama.service";
/**
* Factory function to create Ollama configuration from environment variables
*/
function createOllamaConfig(): OllamaConfig {
const mode = (process.env.OLLAMA_MODE || "local") as "local" | "remote";
const endpoint = process.env.OLLAMA_ENDPOINT || "http://localhost:11434";
const model = process.env.OLLAMA_MODEL || "llama3.2";
const timeout = parseInt(process.env.OLLAMA_TIMEOUT || "30000", 10);
return {
mode,
endpoint,
model,
timeout,
};
}
/**
* Module for Ollama integration
* Provides AI capabilities via local or remote Ollama instances
*/
@Module({
controllers: [OllamaController],
providers: [
{
provide: "OLLAMA_CONFIG",
useFactory: createOllamaConfig,
},
OllamaService,
],
exports: [OllamaService],
})
export class OllamaModule {}

View File

@@ -0,0 +1,441 @@
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { OllamaService } from "./ollama.service";
import { HttpException, HttpStatus } from "@nestjs/common";
import type {
GenerateOptionsDto,
ChatMessage,
ChatOptionsDto,
} from "./dto";
describe("OllamaService", () => {
let service: OllamaService;
let mockFetch: ReturnType<typeof vi.fn>;
const mockConfig = {
mode: "local" as const,
endpoint: "http://localhost:11434",
model: "llama3.2",
timeout: 30000,
};
beforeEach(async () => {
mockFetch = vi.fn();
global.fetch = mockFetch;
const module: TestingModule = await Test.createTestingModule({
providers: [
OllamaService,
{
provide: "OLLAMA_CONFIG",
useValue: mockConfig,
},
],
}).compile();
service = module.get<OllamaService>(OllamaService);
vi.clearAllMocks();
});
describe("generate", () => {
it("should generate text from prompt", async () => {
const mockResponse = {
model: "llama3.2",
response: "This is a generated response.",
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.generate("Hello, world!");
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/generate",
expect.objectContaining({
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: "llama3.2",
prompt: "Hello, world!",
stream: false,
}),
})
);
});
it("should generate text with custom options", async () => {
const options: GenerateOptionsDto = {
temperature: 0.8,
max_tokens: 100,
stop: ["\n"],
};
const mockResponse = {
model: "llama3.2",
response: "Custom response.",
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.generate("Hello", options);
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/generate",
expect.objectContaining({
body: JSON.stringify({
model: "llama3.2",
prompt: "Hello",
stream: false,
options: {
temperature: 0.8,
num_predict: 100,
stop: ["\n"],
},
}),
})
);
});
it("should use custom model when provided", async () => {
const mockResponse = {
model: "mistral",
response: "Response from mistral.",
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.generate("Hello", {}, "mistral");
expect(result).toEqual(mockResponse);
const callArgs = mockFetch.mock.calls[0];
expect(callArgs[0]).toBe("http://localhost:11434/api/generate");
const body = JSON.parse(callArgs[1].body as string);
expect(body.model).toBe("mistral");
expect(body.prompt).toBe("Hello");
expect(body.stream).toBe(false);
});
it("should throw HttpException on network error", async () => {
mockFetch.mockRejectedValue(new Error("Network error"));
await expect(service.generate("Hello")).rejects.toThrow(HttpException);
await expect(service.generate("Hello")).rejects.toThrow(
"Failed to connect to Ollama"
);
});
it("should throw HttpException on non-ok response", async () => {
mockFetch.mockResolvedValue({
ok: false,
status: 500,
statusText: "Internal Server Error",
});
await expect(service.generate("Hello")).rejects.toThrow(HttpException);
});
it("should handle timeout", async () => {
// Mock AbortController to simulate timeout
mockFetch.mockRejectedValue(new Error("The operation was aborted"));
// Create service with very short timeout
const shortTimeoutModule = await Test.createTestingModule({
providers: [
OllamaService,
{
provide: "OLLAMA_CONFIG",
useValue: { ...mockConfig, timeout: 1 },
},
],
}).compile();
const shortTimeoutService =
shortTimeoutModule.get<OllamaService>(OllamaService);
await expect(shortTimeoutService.generate("Hello")).rejects.toThrow(
HttpException
);
});
});
describe("chat", () => {
it("should complete chat with messages", async () => {
const messages: ChatMessage[] = [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "Hello!" },
];
const mockResponse = {
model: "llama3.2",
message: {
role: "assistant",
content: "Hello! How can I help you?",
},
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.chat(messages);
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/chat",
expect.objectContaining({
method: "POST",
body: JSON.stringify({
model: "llama3.2",
messages,
stream: false,
}),
})
);
});
it("should chat with custom options", async () => {
const messages: ChatMessage[] = [
{ role: "user", content: "Hello!" },
];
const options: ChatOptionsDto = {
temperature: 0.5,
max_tokens: 50,
};
const mockResponse = {
model: "llama3.2",
message: { role: "assistant", content: "Hi!" },
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
await service.chat(messages, options);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/chat",
expect.objectContaining({
body: JSON.stringify({
model: "llama3.2",
messages,
stream: false,
options: {
temperature: 0.5,
num_predict: 50,
},
}),
})
);
});
it("should throw HttpException on chat error", async () => {
mockFetch.mockRejectedValue(new Error("Connection refused"));
await expect(
service.chat([{ role: "user", content: "Hello" }])
).rejects.toThrow(HttpException);
});
});
describe("embed", () => {
it("should generate embeddings for text", async () => {
const mockResponse = {
embedding: [0.1, 0.2, 0.3, 0.4],
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.embed("Hello world");
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/embeddings",
expect.objectContaining({
method: "POST",
body: JSON.stringify({
model: "llama3.2",
prompt: "Hello world",
}),
})
);
});
it("should use custom model for embeddings", async () => {
const mockResponse = {
embedding: [0.1, 0.2],
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
await service.embed("Test", "nomic-embed-text");
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/embeddings",
expect.objectContaining({
body: JSON.stringify({
model: "nomic-embed-text",
prompt: "Test",
}),
})
);
});
it("should throw HttpException on embed error", async () => {
mockFetch.mockRejectedValue(new Error("Model not found"));
await expect(service.embed("Hello")).rejects.toThrow(HttpException);
});
});
describe("listModels", () => {
it("should list available models", async () => {
const mockResponse = {
models: [
{
name: "llama3.2:latest",
modified_at: "2024-01-15T10:00:00Z",
size: 4500000000,
digest: "abc123",
},
{
name: "mistral:latest",
modified_at: "2024-01-14T09:00:00Z",
size: 4200000000,
digest: "def456",
},
],
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.listModels();
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/tags",
expect.objectContaining({
method: "GET",
})
);
});
it("should throw HttpException when listing fails", async () => {
mockFetch.mockRejectedValue(new Error("Server error"));
await expect(service.listModels()).rejects.toThrow(HttpException);
});
});
describe("healthCheck", () => {
it("should return healthy status when Ollama is available", async () => {
mockFetch.mockResolvedValue({
ok: true,
json: async () => ({ status: "ok" }),
});
const result = await service.healthCheck();
expect(result).toEqual({
status: "healthy",
mode: "local",
endpoint: "http://localhost:11434",
available: true,
});
});
it("should return unhealthy status when Ollama is unavailable", async () => {
mockFetch.mockRejectedValue(new Error("Connection refused"));
const result = await service.healthCheck();
expect(result).toEqual({
status: "unhealthy",
mode: "local",
endpoint: "http://localhost:11434",
available: false,
error: "Connection refused",
});
});
it("should handle non-ok response in health check", async () => {
mockFetch.mockResolvedValue({
ok: false,
status: 503,
statusText: "Service Unavailable",
});
const result = await service.healthCheck();
expect(result.status).toBe("unhealthy");
expect(result.available).toBe(false);
});
});
describe("configuration", () => {
it("should use remote mode configuration", async () => {
const remoteConfig = {
mode: "remote" as const,
endpoint: "http://remote-server:11434",
model: "mistral",
timeout: 60000,
};
const remoteModule = await Test.createTestingModule({
providers: [
OllamaService,
{
provide: "OLLAMA_CONFIG",
useValue: remoteConfig,
},
],
}).compile();
const remoteService = remoteModule.get<OllamaService>(OllamaService);
mockFetch.mockResolvedValue({
ok: true,
json: async () => ({
model: "mistral",
response: "Remote response",
done: true,
}),
});
await remoteService.generate("Test");
expect(mockFetch).toHaveBeenCalledWith(
"http://remote-server:11434/api/generate",
expect.any(Object)
);
});
});
});

View File

@@ -0,0 +1,344 @@
import { Injectable, Inject, HttpException, HttpStatus } from "@nestjs/common";
import type {
GenerateOptionsDto,
GenerateResponseDto,
ChatMessage,
ChatOptionsDto,
ChatResponseDto,
EmbedResponseDto,
ListModelsResponseDto,
HealthCheckResponseDto,
} from "./dto";
/**
* Configuration for Ollama service
*/
export interface OllamaConfig {
mode: "local" | "remote";
endpoint: string;
model: string;
timeout: number;
}
/**
* Service for interacting with Ollama API
* Supports both local and remote Ollama instances
*/
@Injectable()
export class OllamaService {
constructor(
@Inject("OLLAMA_CONFIG")
private readonly config: OllamaConfig
) {}
/**
* Generate text from a prompt
* @param prompt - The text prompt to generate from
* @param options - Generation options (temperature, max_tokens, etc.)
* @param model - Optional model override (defaults to config model)
* @returns Generated text response
*/
async generate(
prompt: string,
options?: GenerateOptionsDto,
model?: string
): Promise<GenerateResponseDto> {
const url = `${this.config.endpoint}/api/generate`;
const requestBody = {
model: model || this.config.model,
prompt,
stream: false,
...(options && {
options: this.mapGenerateOptions(options),
}),
};
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), this.config.timeout);
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(requestBody),
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
throw new HttpException(
`Ollama API error: ${response.statusText}`,
response.status
);
}
const data = await response.json();
return data as GenerateResponseDto;
} catch (error: unknown) {
if (error instanceof HttpException) {
throw error;
}
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
throw new HttpException(
`Failed to connect to Ollama: ${errorMessage}`,
HttpStatus.SERVICE_UNAVAILABLE
);
}
}
/**
* Complete a chat conversation
* @param messages - Array of chat messages
* @param options - Chat options (temperature, max_tokens, etc.)
* @param model - Optional model override (defaults to config model)
* @returns Chat completion response
*/
async chat(
messages: ChatMessage[],
options?: ChatOptionsDto,
model?: string
): Promise<ChatResponseDto> {
const url = `${this.config.endpoint}/api/chat`;
const requestBody = {
model: model || this.config.model,
messages,
stream: false,
...(options && {
options: this.mapChatOptions(options),
}),
};
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), this.config.timeout);
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(requestBody),
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
throw new HttpException(
`Ollama API error: ${response.statusText}`,
response.status
);
}
const data = await response.json();
return data as ChatResponseDto;
} catch (error: unknown) {
if (error instanceof HttpException) {
throw error;
}
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
throw new HttpException(
`Failed to connect to Ollama: ${errorMessage}`,
HttpStatus.SERVICE_UNAVAILABLE
);
}
}
/**
* Generate embeddings for text
* @param text - The text to generate embeddings for
* @param model - Optional model override (defaults to config model)
* @returns Embedding vector
*/
async embed(text: string, model?: string): Promise<EmbedResponseDto> {
const url = `${this.config.endpoint}/api/embeddings`;
const requestBody = {
model: model || this.config.model,
prompt: text,
};
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), this.config.timeout);
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(requestBody),
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
throw new HttpException(
`Ollama API error: ${response.statusText}`,
response.status
);
}
const data = await response.json();
return data as EmbedResponseDto;
} catch (error: unknown) {
if (error instanceof HttpException) {
throw error;
}
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
throw new HttpException(
`Failed to connect to Ollama: ${errorMessage}`,
HttpStatus.SERVICE_UNAVAILABLE
);
}
}
/**
* List available models
* @returns List of available Ollama models
*/
async listModels(): Promise<ListModelsResponseDto> {
const url = `${this.config.endpoint}/api/tags`;
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), this.config.timeout);
const response = await fetch(url, {
method: "GET",
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
throw new HttpException(
`Ollama API error: ${response.statusText}`,
response.status
);
}
const data = await response.json();
return data as ListModelsResponseDto;
} catch (error: unknown) {
if (error instanceof HttpException) {
throw error;
}
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
throw new HttpException(
`Failed to connect to Ollama: ${errorMessage}`,
HttpStatus.SERVICE_UNAVAILABLE
);
}
}
/**
* Check health and connectivity of Ollama instance
* @returns Health check status
*/
async healthCheck(): Promise<HealthCheckResponseDto> {
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 5000); // 5s timeout for health check
const response = await fetch(`${this.config.endpoint}/api/tags`, {
method: "GET",
signal: controller.signal,
});
clearTimeout(timeoutId);
if (response.ok) {
return {
status: "healthy",
mode: this.config.mode,
endpoint: this.config.endpoint,
available: true,
};
} else {
return {
status: "unhealthy",
mode: this.config.mode,
endpoint: this.config.endpoint,
available: false,
error: `HTTP ${response.status}: ${response.statusText}`,
};
}
} catch (error: unknown) {
const errorMessage =
error instanceof Error ? error.message : "Unknown error";
return {
status: "unhealthy",
mode: this.config.mode,
endpoint: this.config.endpoint,
available: false,
error: errorMessage,
};
}
}
/**
* Map GenerateOptionsDto to Ollama API options format
*/
private mapGenerateOptions(
options: GenerateOptionsDto
): Record<string, unknown> {
const mapped: Record<string, unknown> = {};
if (options.temperature !== undefined) {
mapped.temperature = options.temperature;
}
if (options.top_p !== undefined) {
mapped.top_p = options.top_p;
}
if (options.max_tokens !== undefined) {
mapped.num_predict = options.max_tokens;
}
if (options.stop !== undefined) {
mapped.stop = options.stop;
}
return mapped;
}
/**
* Map ChatOptionsDto to Ollama API options format
*/
private mapChatOptions(options: ChatOptionsDto): Record<string, unknown> {
const mapped: Record<string, unknown> = {};
if (options.temperature !== undefined) {
mapped.temperature = options.temperature;
}
if (options.top_p !== undefined) {
mapped.top_p = options.top_p;
}
if (options.max_tokens !== undefined) {
mapped.num_predict = options.max_tokens;
}
if (options.stop !== undefined) {
mapped.stop = options.stop;
}
return mapped;
}
}