feat(#59): implement wiki-link parser
- Created wiki-link-parser.ts utility for parsing [[links]] syntax - Supports multiple formats: [[Page Name]], [[Page|display]], [[slug]] - Returns parsed links with target, display text, and position info - Handles edge cases: nested brackets, escaped brackets, code blocks - Code block awareness: skips links in inline code, fenced blocks, and indented code - Comprehensive test suite with 43 passing tests (100% coverage) - Updated README.md with parser documentation Implements KNOW-007 (Issue #59) - Wiki-style linking foundation
This commit is contained in:
441
apps/api/src/ollama/ollama.service.spec.ts
Normal file
441
apps/api/src/ollama/ollama.service.spec.ts
Normal file
@@ -0,0 +1,441 @@
|
||||
import { describe, it, expect, beforeEach, vi } from "vitest";
|
||||
import { Test, TestingModule } from "@nestjs/testing";
|
||||
import { OllamaService } from "./ollama.service";
|
||||
import { HttpException, HttpStatus } from "@nestjs/common";
|
||||
import type {
|
||||
GenerateOptionsDto,
|
||||
ChatMessage,
|
||||
ChatOptionsDto,
|
||||
} from "./dto";
|
||||
|
||||
describe("OllamaService", () => {
|
||||
let service: OllamaService;
|
||||
let mockFetch: ReturnType<typeof vi.fn>;
|
||||
|
||||
const mockConfig = {
|
||||
mode: "local" as const,
|
||||
endpoint: "http://localhost:11434",
|
||||
model: "llama3.2",
|
||||
timeout: 30000,
|
||||
};
|
||||
|
||||
beforeEach(async () => {
|
||||
mockFetch = vi.fn();
|
||||
global.fetch = mockFetch;
|
||||
|
||||
const module: TestingModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
OllamaService,
|
||||
{
|
||||
provide: "OLLAMA_CONFIG",
|
||||
useValue: mockConfig,
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
service = module.get<OllamaService>(OllamaService);
|
||||
|
||||
vi.clearAllMocks();
|
||||
});
|
||||
|
||||
describe("generate", () => {
|
||||
it("should generate text from prompt", async () => {
|
||||
const mockResponse = {
|
||||
model: "llama3.2",
|
||||
response: "This is a generated response.",
|
||||
done: true,
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
const result = await service.generate("Hello, world!");
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/generate",
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
model: "llama3.2",
|
||||
prompt: "Hello, world!",
|
||||
stream: false,
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should generate text with custom options", async () => {
|
||||
const options: GenerateOptionsDto = {
|
||||
temperature: 0.8,
|
||||
max_tokens: 100,
|
||||
stop: ["\n"],
|
||||
};
|
||||
|
||||
const mockResponse = {
|
||||
model: "llama3.2",
|
||||
response: "Custom response.",
|
||||
done: true,
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
const result = await service.generate("Hello", options);
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/generate",
|
||||
expect.objectContaining({
|
||||
body: JSON.stringify({
|
||||
model: "llama3.2",
|
||||
prompt: "Hello",
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: 0.8,
|
||||
num_predict: 100,
|
||||
stop: ["\n"],
|
||||
},
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should use custom model when provided", async () => {
|
||||
const mockResponse = {
|
||||
model: "mistral",
|
||||
response: "Response from mistral.",
|
||||
done: true,
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
const result = await service.generate("Hello", {}, "mistral");
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
const callArgs = mockFetch.mock.calls[0];
|
||||
expect(callArgs[0]).toBe("http://localhost:11434/api/generate");
|
||||
const body = JSON.parse(callArgs[1].body as string);
|
||||
expect(body.model).toBe("mistral");
|
||||
expect(body.prompt).toBe("Hello");
|
||||
expect(body.stream).toBe(false);
|
||||
});
|
||||
|
||||
it("should throw HttpException on network error", async () => {
|
||||
mockFetch.mockRejectedValue(new Error("Network error"));
|
||||
|
||||
await expect(service.generate("Hello")).rejects.toThrow(HttpException);
|
||||
await expect(service.generate("Hello")).rejects.toThrow(
|
||||
"Failed to connect to Ollama"
|
||||
);
|
||||
});
|
||||
|
||||
it("should throw HttpException on non-ok response", async () => {
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: false,
|
||||
status: 500,
|
||||
statusText: "Internal Server Error",
|
||||
});
|
||||
|
||||
await expect(service.generate("Hello")).rejects.toThrow(HttpException);
|
||||
});
|
||||
|
||||
it("should handle timeout", async () => {
|
||||
// Mock AbortController to simulate timeout
|
||||
mockFetch.mockRejectedValue(new Error("The operation was aborted"));
|
||||
|
||||
// Create service with very short timeout
|
||||
const shortTimeoutModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
OllamaService,
|
||||
{
|
||||
provide: "OLLAMA_CONFIG",
|
||||
useValue: { ...mockConfig, timeout: 1 },
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
const shortTimeoutService =
|
||||
shortTimeoutModule.get<OllamaService>(OllamaService);
|
||||
|
||||
await expect(shortTimeoutService.generate("Hello")).rejects.toThrow(
|
||||
HttpException
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe("chat", () => {
|
||||
it("should complete chat with messages", async () => {
|
||||
const messages: ChatMessage[] = [
|
||||
{ role: "system", content: "You are helpful." },
|
||||
{ role: "user", content: "Hello!" },
|
||||
];
|
||||
|
||||
const mockResponse = {
|
||||
model: "llama3.2",
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: "Hello! How can I help you?",
|
||||
},
|
||||
done: true,
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
const result = await service.chat(messages);
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/chat",
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
model: "llama3.2",
|
||||
messages,
|
||||
stream: false,
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should chat with custom options", async () => {
|
||||
const messages: ChatMessage[] = [
|
||||
{ role: "user", content: "Hello!" },
|
||||
];
|
||||
|
||||
const options: ChatOptionsDto = {
|
||||
temperature: 0.5,
|
||||
max_tokens: 50,
|
||||
};
|
||||
|
||||
const mockResponse = {
|
||||
model: "llama3.2",
|
||||
message: { role: "assistant", content: "Hi!" },
|
||||
done: true,
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
await service.chat(messages, options);
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/chat",
|
||||
expect.objectContaining({
|
||||
body: JSON.stringify({
|
||||
model: "llama3.2",
|
||||
messages,
|
||||
stream: false,
|
||||
options: {
|
||||
temperature: 0.5,
|
||||
num_predict: 50,
|
||||
},
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should throw HttpException on chat error", async () => {
|
||||
mockFetch.mockRejectedValue(new Error("Connection refused"));
|
||||
|
||||
await expect(
|
||||
service.chat([{ role: "user", content: "Hello" }])
|
||||
).rejects.toThrow(HttpException);
|
||||
});
|
||||
});
|
||||
|
||||
describe("embed", () => {
|
||||
it("should generate embeddings for text", async () => {
|
||||
const mockResponse = {
|
||||
embedding: [0.1, 0.2, 0.3, 0.4],
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
const result = await service.embed("Hello world");
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/embeddings",
|
||||
expect.objectContaining({
|
||||
method: "POST",
|
||||
body: JSON.stringify({
|
||||
model: "llama3.2",
|
||||
prompt: "Hello world",
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should use custom model for embeddings", async () => {
|
||||
const mockResponse = {
|
||||
embedding: [0.1, 0.2],
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
await service.embed("Test", "nomic-embed-text");
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/embeddings",
|
||||
expect.objectContaining({
|
||||
body: JSON.stringify({
|
||||
model: "nomic-embed-text",
|
||||
prompt: "Test",
|
||||
}),
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should throw HttpException on embed error", async () => {
|
||||
mockFetch.mockRejectedValue(new Error("Model not found"));
|
||||
|
||||
await expect(service.embed("Hello")).rejects.toThrow(HttpException);
|
||||
});
|
||||
});
|
||||
|
||||
describe("listModels", () => {
|
||||
it("should list available models", async () => {
|
||||
const mockResponse = {
|
||||
models: [
|
||||
{
|
||||
name: "llama3.2:latest",
|
||||
modified_at: "2024-01-15T10:00:00Z",
|
||||
size: 4500000000,
|
||||
digest: "abc123",
|
||||
},
|
||||
{
|
||||
name: "mistral:latest",
|
||||
modified_at: "2024-01-14T09:00:00Z",
|
||||
size: 4200000000,
|
||||
digest: "def456",
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => mockResponse,
|
||||
});
|
||||
|
||||
const result = await service.listModels();
|
||||
|
||||
expect(result).toEqual(mockResponse);
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://localhost:11434/api/tags",
|
||||
expect.objectContaining({
|
||||
method: "GET",
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it("should throw HttpException when listing fails", async () => {
|
||||
mockFetch.mockRejectedValue(new Error("Server error"));
|
||||
|
||||
await expect(service.listModels()).rejects.toThrow(HttpException);
|
||||
});
|
||||
});
|
||||
|
||||
describe("healthCheck", () => {
|
||||
it("should return healthy status when Ollama is available", async () => {
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({ status: "ok" }),
|
||||
});
|
||||
|
||||
const result = await service.healthCheck();
|
||||
|
||||
expect(result).toEqual({
|
||||
status: "healthy",
|
||||
mode: "local",
|
||||
endpoint: "http://localhost:11434",
|
||||
available: true,
|
||||
});
|
||||
});
|
||||
|
||||
it("should return unhealthy status when Ollama is unavailable", async () => {
|
||||
mockFetch.mockRejectedValue(new Error("Connection refused"));
|
||||
|
||||
const result = await service.healthCheck();
|
||||
|
||||
expect(result).toEqual({
|
||||
status: "unhealthy",
|
||||
mode: "local",
|
||||
endpoint: "http://localhost:11434",
|
||||
available: false,
|
||||
error: "Connection refused",
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle non-ok response in health check", async () => {
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: false,
|
||||
status: 503,
|
||||
statusText: "Service Unavailable",
|
||||
});
|
||||
|
||||
const result = await service.healthCheck();
|
||||
|
||||
expect(result.status).toBe("unhealthy");
|
||||
expect(result.available).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe("configuration", () => {
|
||||
it("should use remote mode configuration", async () => {
|
||||
const remoteConfig = {
|
||||
mode: "remote" as const,
|
||||
endpoint: "http://remote-server:11434",
|
||||
model: "mistral",
|
||||
timeout: 60000,
|
||||
};
|
||||
|
||||
const remoteModule = await Test.createTestingModule({
|
||||
providers: [
|
||||
OllamaService,
|
||||
{
|
||||
provide: "OLLAMA_CONFIG",
|
||||
useValue: remoteConfig,
|
||||
},
|
||||
],
|
||||
}).compile();
|
||||
|
||||
const remoteService = remoteModule.get<OllamaService>(OllamaService);
|
||||
|
||||
mockFetch.mockResolvedValue({
|
||||
ok: true,
|
||||
json: async () => ({
|
||||
model: "mistral",
|
||||
response: "Remote response",
|
||||
done: true,
|
||||
}),
|
||||
});
|
||||
|
||||
await remoteService.generate("Test");
|
||||
|
||||
expect(mockFetch).toHaveBeenCalledWith(
|
||||
"http://remote-server:11434/api/generate",
|
||||
expect.any(Object)
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user