Files
stack/apps/api/src/ollama/ollama.service.spec.ts
Jason Woltje 12abdfe81d feat(#93): implement agent spawn via federation
Implements FED-010: Agent Spawn via Federation feature that enables
spawning and managing Claude agents on remote federated Mosaic Stack
instances via COMMAND message type.

Features:
- Federation agent command types (spawn, status, kill)
- FederationAgentService for handling agent operations
- Integration with orchestrator's agent spawner/lifecycle services
- API endpoints for spawning, querying status, and killing agents
- Full command routing through federation COMMAND infrastructure
- Comprehensive test coverage (12/12 tests passing)

Architecture:
- Hub → Spoke: Spawn agents on remote instances
- Command flow: FederationController → FederationAgentService →
  CommandService → Remote Orchestrator
- Response handling: Remote orchestrator returns agent status/results
- Security: Connection validation, signature verification

Files created:
- apps/api/src/federation/types/federation-agent.types.ts
- apps/api/src/federation/federation-agent.service.ts
- apps/api/src/federation/federation-agent.service.spec.ts

Files modified:
- apps/api/src/federation/command.service.ts (agent command routing)
- apps/api/src/federation/federation.controller.ts (agent endpoints)
- apps/api/src/federation/federation.module.ts (service registration)
- apps/orchestrator/src/api/agents/agents.controller.ts (status endpoint)
- apps/orchestrator/src/api/agents/agents.module.ts (lifecycle integration)

Testing:
- 12/12 tests passing for FederationAgentService
- All command service tests passing
- TypeScript compilation successful
- Linting passed

Refs #93

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-02-03 14:37:06 -06:00

431 lines
11 KiB
TypeScript

import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { OllamaService } from "./ollama.service";
import { HttpException, HttpStatus } from "@nestjs/common";
import type { GenerateOptionsDto, ChatMessage, ChatOptionsDto } from "./dto";
describe("OllamaService", () => {
let service: OllamaService;
let mockFetch: ReturnType<typeof vi.fn>;
const mockConfig = {
mode: "local" as const,
endpoint: "http://localhost:11434",
model: "llama3.2",
timeout: 30000,
};
beforeEach(async () => {
mockFetch = vi.fn();
global.fetch = mockFetch;
const module: TestingModule = await Test.createTestingModule({
providers: [
OllamaService,
{
provide: "OLLAMA_CONFIG",
useValue: mockConfig,
},
],
}).compile();
service = module.get<OllamaService>(OllamaService);
vi.clearAllMocks();
});
describe("generate", () => {
it("should generate text from prompt", async () => {
const mockResponse = {
model: "llama3.2",
response: "This is a generated response.",
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.generate("Hello, world!");
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/generate",
expect.objectContaining({
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: "llama3.2",
prompt: "Hello, world!",
stream: false,
}),
})
);
});
it("should generate text with custom options", async () => {
const options: GenerateOptionsDto = {
temperature: 0.8,
max_tokens: 100,
stop: ["\n"],
};
const mockResponse = {
model: "llama3.2",
response: "Custom response.",
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.generate("Hello", options);
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/generate",
expect.objectContaining({
body: JSON.stringify({
model: "llama3.2",
prompt: "Hello",
stream: false,
options: {
temperature: 0.8,
num_predict: 100,
stop: ["\n"],
},
}),
})
);
});
it("should use custom model when provided", async () => {
const mockResponse = {
model: "mistral",
response: "Response from mistral.",
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.generate("Hello", {}, "mistral");
expect(result).toEqual(mockResponse);
const callArgs = mockFetch.mock.calls[0];
expect(callArgs[0]).toBe("http://localhost:11434/api/generate");
const body = JSON.parse(callArgs[1].body as string);
expect(body.model).toBe("mistral");
expect(body.prompt).toBe("Hello");
expect(body.stream).toBe(false);
});
it("should throw HttpException on network error", async () => {
mockFetch.mockRejectedValue(new Error("Network error"));
await expect(service.generate("Hello")).rejects.toThrow(HttpException);
await expect(service.generate("Hello")).rejects.toThrow("Failed to connect to Ollama");
});
it("should throw HttpException on non-ok response", async () => {
mockFetch.mockResolvedValue({
ok: false,
status: 500,
statusText: "Internal Server Error",
});
await expect(service.generate("Hello")).rejects.toThrow(HttpException);
});
it("should handle timeout", async () => {
// Mock AbortController to simulate timeout
mockFetch.mockRejectedValue(new Error("The operation was aborted"));
// Create service with very short timeout
const shortTimeoutModule = await Test.createTestingModule({
providers: [
OllamaService,
{
provide: "OLLAMA_CONFIG",
useValue: { ...mockConfig, timeout: 1 },
},
],
}).compile();
const shortTimeoutService = shortTimeoutModule.get<OllamaService>(OllamaService);
await expect(shortTimeoutService.generate("Hello")).rejects.toThrow(HttpException);
});
});
describe("chat", () => {
it("should complete chat with messages", async () => {
const messages: ChatMessage[] = [
{ role: "system", content: "You are helpful." },
{ role: "user", content: "Hello!" },
];
const mockResponse = {
model: "llama3.2",
message: {
role: "assistant",
content: "Hello! How can I help you?",
},
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.chat(messages);
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/chat",
expect.objectContaining({
method: "POST",
body: JSON.stringify({
model: "llama3.2",
messages,
stream: false,
}),
})
);
});
it("should chat with custom options", async () => {
const messages: ChatMessage[] = [{ role: "user", content: "Hello!" }];
const options: ChatOptionsDto = {
temperature: 0.5,
max_tokens: 50,
};
const mockResponse = {
model: "llama3.2",
message: { role: "assistant", content: "Hi!" },
done: true,
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
await service.chat(messages, options);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/chat",
expect.objectContaining({
body: JSON.stringify({
model: "llama3.2",
messages,
stream: false,
options: {
temperature: 0.5,
num_predict: 50,
},
}),
})
);
});
it("should throw HttpException on chat error", async () => {
mockFetch.mockRejectedValue(new Error("Connection refused"));
await expect(service.chat([{ role: "user", content: "Hello" }])).rejects.toThrow(
HttpException
);
});
});
describe("embed", () => {
it("should generate embeddings for text", async () => {
const mockResponse = {
embedding: [0.1, 0.2, 0.3, 0.4],
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.embed("Hello world");
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/embeddings",
expect.objectContaining({
method: "POST",
body: JSON.stringify({
model: "llama3.2",
prompt: "Hello world",
}),
})
);
});
it("should use custom model for embeddings", async () => {
const mockResponse = {
embedding: [0.1, 0.2],
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
await service.embed("Test", "nomic-embed-text");
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/embeddings",
expect.objectContaining({
body: JSON.stringify({
model: "nomic-embed-text",
prompt: "Test",
}),
})
);
});
it("should throw HttpException on embed error", async () => {
mockFetch.mockRejectedValue(new Error("Model not found"));
await expect(service.embed("Hello")).rejects.toThrow(HttpException);
});
});
describe("listModels", () => {
it("should list available models", async () => {
const mockResponse = {
models: [
{
name: "llama3.2:latest",
modified_at: "2024-01-15T10:00:00Z",
size: 4500000000,
digest: "abc123",
},
{
name: "mistral:latest",
modified_at: "2024-01-14T09:00:00Z",
size: 4200000000,
digest: "def456",
},
],
};
mockFetch.mockResolvedValue({
ok: true,
json: async () => mockResponse,
});
const result = await service.listModels();
expect(result).toEqual(mockResponse);
expect(mockFetch).toHaveBeenCalledWith(
"http://localhost:11434/api/tags",
expect.objectContaining({
method: "GET",
})
);
});
it("should throw HttpException when listing fails", async () => {
mockFetch.mockRejectedValue(new Error("Server error"));
await expect(service.listModels()).rejects.toThrow(HttpException);
});
});
describe("healthCheck", () => {
it("should return healthy status when Ollama is available", async () => {
mockFetch.mockResolvedValue({
ok: true,
json: async () => ({ status: "ok" }),
});
const result = await service.healthCheck();
expect(result).toEqual({
status: "healthy",
mode: "local",
endpoint: "http://localhost:11434",
available: true,
});
});
it("should return unhealthy status when Ollama is unavailable", async () => {
mockFetch.mockRejectedValue(new Error("Connection refused"));
const result = await service.healthCheck();
expect(result).toEqual({
status: "unhealthy",
mode: "local",
endpoint: "http://localhost:11434",
available: false,
error: "Connection refused",
});
});
it("should handle non-ok response in health check", async () => {
mockFetch.mockResolvedValue({
ok: false,
status: 503,
statusText: "Service Unavailable",
});
const result = await service.healthCheck();
expect(result.status).toBe("unhealthy");
expect(result.available).toBe(false);
});
});
describe("configuration", () => {
it("should use remote mode configuration", async () => {
const remoteConfig = {
mode: "remote" as const,
endpoint: "http://remote-server:11434",
model: "mistral",
timeout: 60000,
};
const remoteModule = await Test.createTestingModule({
providers: [
OllamaService,
{
provide: "OLLAMA_CONFIG",
useValue: remoteConfig,
},
],
}).compile();
const remoteService = remoteModule.get<OllamaService>(OllamaService);
mockFetch.mockResolvedValue({
ok: true,
json: async () => ({
model: "mistral",
response: "Remote response",
done: true,
}),
});
await remoteService.generate("Test");
expect(mockFetch).toHaveBeenCalledWith(
"http://remote-server:11434/api/generate",
expect.any(Object)
);
});
});
});