Files
stack/apps/api/src/ollama/ollama.controller.spec.ts
Jason Woltje 1e5fcd19a4 feat(#59): implement wiki-link parser
- Created wiki-link-parser.ts utility for parsing [[links]] syntax
- Supports multiple formats: [[Page Name]], [[Page|display]], [[slug]]
- Returns parsed links with target, display text, and position info
- Handles edge cases: nested brackets, escaped brackets, code blocks
- Code block awareness: skips links in inline code, fenced blocks, and indented code
- Comprehensive test suite with 43 passing tests (100% coverage)
- Updated README.md with parser documentation

Implements KNOW-007 (Issue #59) - Wiki-style linking foundation
2026-01-29 17:42:49 -06:00

244 lines
6.0 KiB
TypeScript

import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { OllamaController } from "./ollama.controller";
import { OllamaService } from "./ollama.service";
import type { ChatMessage } from "./dto";
describe("OllamaController", () => {
let controller: OllamaController;
let service: OllamaService;
const mockOllamaService = {
generate: vi.fn(),
chat: vi.fn(),
embed: vi.fn(),
listModels: vi.fn(),
healthCheck: vi.fn(),
};
beforeEach(async () => {
const module: TestingModule = await Test.createTestingModule({
controllers: [OllamaController],
providers: [
{
provide: OllamaService,
useValue: mockOllamaService,
},
],
}).compile();
controller = module.get<OllamaController>(OllamaController);
service = module.get<OllamaService>(OllamaService);
vi.clearAllMocks();
});
describe("generate", () => {
it("should generate text from prompt", async () => {
const mockResponse = {
model: "llama3.2",
response: "Generated text",
done: true,
};
mockOllamaService.generate.mockResolvedValue(mockResponse);
const result = await controller.generate({
prompt: "Hello",
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.generate).toHaveBeenCalledWith(
"Hello",
undefined,
undefined
);
});
it("should generate with options and custom model", async () => {
const mockResponse = {
model: "mistral",
response: "Response",
done: true,
};
mockOllamaService.generate.mockResolvedValue(mockResponse);
const result = await controller.generate({
prompt: "Test",
model: "mistral",
options: {
temperature: 0.7,
max_tokens: 100,
},
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.generate).toHaveBeenCalledWith(
"Test",
{ temperature: 0.7, max_tokens: 100 },
"mistral"
);
});
});
describe("chat", () => {
it("should complete chat conversation", async () => {
const messages: ChatMessage[] = [
{ role: "user", content: "Hello!" },
];
const mockResponse = {
model: "llama3.2",
message: {
role: "assistant",
content: "Hi there!",
},
done: true,
};
mockOllamaService.chat.mockResolvedValue(mockResponse);
const result = await controller.chat({
messages,
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.chat).toHaveBeenCalledWith(
messages,
undefined,
undefined
);
});
it("should chat with options and custom model", async () => {
const messages: ChatMessage[] = [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "Hello!" },
];
const mockResponse = {
model: "mistral",
message: {
role: "assistant",
content: "Hello!",
},
done: true,
};
mockOllamaService.chat.mockResolvedValue(mockResponse);
const result = await controller.chat({
messages,
model: "mistral",
options: {
temperature: 0.5,
},
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.chat).toHaveBeenCalledWith(
messages,
{ temperature: 0.5 },
"mistral"
);
});
});
describe("embed", () => {
it("should generate embeddings", async () => {
const mockResponse = {
embedding: [0.1, 0.2, 0.3],
};
mockOllamaService.embed.mockResolvedValue(mockResponse);
const result = await controller.embed({
text: "Sample text",
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.embed).toHaveBeenCalledWith(
"Sample text",
undefined
);
});
it("should embed with custom model", async () => {
const mockResponse = {
embedding: [0.1, 0.2],
};
mockOllamaService.embed.mockResolvedValue(mockResponse);
const result = await controller.embed({
text: "Test",
model: "nomic-embed-text",
});
expect(result).toEqual(mockResponse);
expect(mockOllamaService.embed).toHaveBeenCalledWith(
"Test",
"nomic-embed-text"
);
});
});
describe("listModels", () => {
it("should list available models", async () => {
const mockResponse = {
models: [
{
name: "llama3.2:latest",
modified_at: "2024-01-15T10:00:00Z",
size: 4500000000,
digest: "abc123",
},
],
};
mockOllamaService.listModels.mockResolvedValue(mockResponse);
const result = await controller.listModels();
expect(result).toEqual(mockResponse);
expect(mockOllamaService.listModels).toHaveBeenCalled();
});
});
describe("healthCheck", () => {
it("should return health status", async () => {
const mockResponse = {
status: "healthy" as const,
mode: "local" as const,
endpoint: "http://localhost:11434",
available: true,
};
mockOllamaService.healthCheck.mockResolvedValue(mockResponse);
const result = await controller.healthCheck();
expect(result).toEqual(mockResponse);
expect(mockOllamaService.healthCheck).toHaveBeenCalled();
});
it("should return unhealthy status", async () => {
const mockResponse = {
status: "unhealthy" as const,
mode: "local" as const,
endpoint: "http://localhost:11434",
available: false,
error: "Connection refused",
};
mockOllamaService.healthCheck.mockResolvedValue(mockResponse);
const result = await controller.healthCheck();
expect(result).toEqual(mockResponse);
expect(result.status).toBe("unhealthy");
});
});
});