test(#141): add Non-AI Coordinator integration tests

Comprehensive E2E validation proving coordinator enforces quality
gates and prevents premature completion claims.

Test scenarios (21 tests):
- Rejection Flow: Build/lint/test/coverage gate failures
- Acceptance Flow: All gates pass, required-only pass
- Continuation Flow: Retry, escalation, attempt tracking
- Escalation Flow: Manual review, notifications, history
- Configuration: Workspace-specific, defaults, custom gates
- Performance: Timeout compliance, memory limits
- Complete E2E: Full rejection-continuation-acceptance cycle

Fixtures:
- mock-agent-outputs.ts: Simulated gate execution results
- mock-gate-configs.ts: Various gate configurations

Validates integration of:
- Quality Orchestrator (#134)
- Quality Gate Config (#135)
- Completion Verification (#136)
- Continuation Prompts (#137)
- Rejection Handler (#139)

All 21 tests passing

Fixes #141

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-31 14:14:56 -06:00
parent a86d304f07
commit faf6328e0b
4 changed files with 1137 additions and 0 deletions

View File

@@ -0,0 +1,796 @@
/**
* Integration tests for Non-AI Coordinator
* Validates complete orchestration flow end-to-end
*/
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { QualityOrchestratorService } from "../quality-orchestrator.service";
import { CompletionVerificationService } from "../../completion-verification/completion-verification.service";
import { ContinuationPromptsService } from "../../continuation-prompts/continuation-prompts.service";
import { RejectionHandlerService } from "../../rejection-handler/rejection-handler.service";
import { PrismaService } from "../../prisma/prisma.service";
import type { CompletionClaim, OrchestrationConfig, QualityGate } from "../interfaces";
import type { RejectionContext } from "../../rejection-handler/interfaces";
import { MOCK_OUTPUTS, MOCK_FILE_CHANGES } from "./test-fixtures";
// Mock child_process exec - must be defined inside factory to avoid hoisting issues
vi.mock("child_process", () => {
return {
exec: vi.fn(),
};
});
describe("Non-AI Coordinator Integration", () => {
let orchestrator: QualityOrchestratorService;
let verification: CompletionVerificationService;
let prompts: ContinuationPromptsService;
let rejection: RejectionHandlerService;
let mockPrisma: Partial<PrismaService>;
let execMock: ReturnType<typeof vi.fn>;
beforeEach(async () => {
// Get the mocked exec function
const childProcess = await import("child_process");
execMock = vi.mocked(childProcess.exec);
// Mock PrismaService
mockPrisma = {
taskRejection: {
create: vi.fn().mockResolvedValue({
id: "rejection-1",
taskId: "task-1",
workspaceId: "workspace-1",
agentId: "agent-1",
attemptCount: 1,
failures: [],
originalTask: "Test task",
startedAt: new Date(),
rejectedAt: new Date(),
escalated: false,
manualReview: false,
}),
findMany: vi.fn().mockResolvedValue([]),
update: vi.fn().mockResolvedValue({
id: "rejection-1",
manualReview: true,
escalated: true,
}),
},
} as Partial<PrismaService>;
const module: TestingModule = await Test.createTestingModule({
providers: [
QualityOrchestratorService,
CompletionVerificationService,
ContinuationPromptsService,
RejectionHandlerService,
{
provide: PrismaService,
useValue: mockPrisma,
},
],
}).compile();
orchestrator = module.get<QualityOrchestratorService>(QualityOrchestratorService);
verification = module.get<CompletionVerificationService>(CompletionVerificationService);
prompts = module.get<ContinuationPromptsService>(ContinuationPromptsService);
rejection = module.get<RejectionHandlerService>(RejectionHandlerService);
});
afterEach(() => {
vi.clearAllMocks();
});
/**
* Helper to create a completion claim
*/
function createClaim(overrides?: Partial<CompletionClaim>): CompletionClaim {
return {
taskId: "task-1",
agentId: "agent-1",
workspaceId: "workspace-1",
claimedAt: new Date(),
message: "Task completed successfully",
filesChanged: MOCK_FILE_CHANGES.withTests,
...overrides,
};
}
/**
* Helper to create orchestration config
*/
function createConfig(overrides?: Partial<OrchestrationConfig>): OrchestrationConfig {
const defaultGates: QualityGate[] = [
{
id: "build",
name: "Build Check",
description: "Verify code compiles",
type: "build",
command: "pnpm build",
required: true,
order: 1,
},
{
id: "lint",
name: "Lint Check",
description: "Code style check",
type: "lint",
command: "pnpm lint",
required: true,
order: 2,
},
{
id: "test",
name: "Test Suite",
description: "All tests pass",
type: "test",
command: "pnpm test",
required: true,
order: 3,
},
{
id: "coverage",
name: "Coverage Check",
description: "Test coverage >= 85%",
type: "coverage",
command: "pnpm test:coverage",
expectedOutput: /All files.*[89]\d|100/,
required: false,
order: 4,
},
];
return {
gates: defaultGates,
strictMode: false,
maxContinuations: 3,
...overrides,
};
}
/**
* Mock exec to simulate gate success or failure
*/
function mockGate(gateName: string, success: boolean): void {
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
const output = success
? MOCK_OUTPUTS[`${gateName}Success` as keyof typeof MOCK_OUTPUTS]
: MOCK_OUTPUTS[`${gateName}Failure` as keyof typeof MOCK_OUTPUTS];
if (success) {
callback(null, { stdout: output.output, stderr: "" });
} else {
const error = new Error(output.output);
Object.assign(error, { code: output.exitCode });
callback(error);
}
});
}
/**
* Mock all gates to pass
*/
function mockAllGatesPass(): void {
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
if (cmd.includes("build")) {
callback(null, { stdout: MOCK_OUTPUTS.buildSuccess.output, stderr: "" });
} else if (cmd.includes("lint")) {
callback(null, { stdout: MOCK_OUTPUTS.lintSuccess.output, stderr: "" });
} else if (cmd.includes("test:coverage")) {
callback(null, { stdout: MOCK_OUTPUTS.coveragePass.output, stderr: "" });
} else if (cmd.includes("test")) {
callback(null, { stdout: MOCK_OUTPUTS.testSuccess.output, stderr: "" });
} else {
callback(null, { stdout: "Success", stderr: "" });
}
});
}
describe("Rejection Flow", () => {
it("should reject agent claim when build gate fails", async () => {
const claim = createClaim();
const config = createConfig();
mockGate("build", false);
const result = await orchestrator.validateCompletion(claim, config);
expect(result.verdict).toBe("rejected");
expect(result.allGatesPassed).toBe(false);
expect(result.requiredGatesFailed).toContain("build");
expect(result.feedback).toContain("Build Check");
});
it("should reject agent claim when lint gate fails", async () => {
const claim = createClaim();
const config = createConfig();
// Build passes, lint fails, everything else passes
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
if (cmd.includes("build")) {
callback(null, { stdout: MOCK_OUTPUTS.buildSuccess.output, stderr: "" });
} else if (cmd.includes("lint")) {
const error = new Error(MOCK_OUTPUTS.lintFailure.output);
Object.assign(error, { code: 1 });
callback(error);
} else if (cmd.includes("test:coverage")) {
callback(null, { stdout: MOCK_OUTPUTS.coveragePass.output, stderr: "" });
} else if (cmd.includes("test")) {
callback(null, { stdout: MOCK_OUTPUTS.testSuccess.output, stderr: "" });
} else {
callback(null, { stdout: "Success", stderr: "" });
}
});
const result = await orchestrator.validateCompletion(claim, config);
expect(result.verdict).toBe("rejected");
expect(result.requiredGatesFailed).toContain("lint");
expect(result.suggestedActions).toEqual(
expect.arrayContaining([expect.stringContaining("lint")])
);
});
it("should reject agent claim when test gate fails", async () => {
const claim = createClaim();
const config = createConfig();
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
if (cmd.includes("build")) {
callback(null, { stdout: MOCK_OUTPUTS.buildSuccess.output, stderr: "" });
} else if (cmd.includes("lint")) {
callback(null, { stdout: MOCK_OUTPUTS.lintSuccess.output, stderr: "" });
} else if (cmd.includes("test")) {
const error = new Error(MOCK_OUTPUTS.testFailure.output);
Object.assign(error, { code: 1 });
callback(error);
}
});
const result = await orchestrator.validateCompletion(claim, config);
expect(result.verdict).toBe("rejected");
expect(result.requiredGatesFailed).toContain("test");
expect(result.suggestedActions).toEqual(
expect.arrayContaining([expect.stringContaining("Fix failing tests")])
);
});
it("should reject agent claim when coverage is below threshold", async () => {
const claim = createClaim();
// Mark coverage as required to ensure rejection
const customConfig = createConfig({
gates: [
{
id: "build",
name: "Build Check",
description: "Verify code compiles",
type: "build",
command: "pnpm build",
required: true,
order: 1,
},
{
id: "coverage",
name: "Coverage Check",
description: "Test coverage >= 85%",
type: "coverage",
command: "pnpm test:coverage",
required: true, // Make coverage required so it causes rejection
order: 2,
},
],
});
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
if (cmd.includes("build")) {
callback(null, { stdout: MOCK_OUTPUTS.buildSuccess.output, stderr: "" });
} else if (cmd.includes("test:coverage")) {
// Simulate coverage failure by returning an error
const error = new Error("Coverage below threshold: 72% < 85%");
Object.assign(error, { code: 1 });
callback(error);
} else {
callback(null, { stdout: "Success", stderr: "" });
}
});
const result = await orchestrator.validateCompletion(claim, customConfig);
expect(result.verdict).toBe("rejected");
expect(result.allGatesPassed).toBe(false);
expect(result.requiredGatesFailed).toContain("coverage");
// Coverage gate should fail due to error
const coverageGate = result.gateResults.find((r) => r.gateId === "coverage");
expect(coverageGate?.passed).toBe(false);
});
it("should generate continuation prompt with specific failures", async () => {
const claim = createClaim();
const config = createConfig();
mockGate("build", false);
const validation = await orchestrator.validateCompletion(claim, config);
expect(validation.verdict).toBe("rejected");
const continuationPrompt = orchestrator.generateContinuationPrompt(validation);
expect(continuationPrompt).toContain("Quality gates failed");
expect(continuationPrompt).toContain("Build Check");
expect(continuationPrompt).toContain("Suggested actions");
});
});
describe("Acceptance Flow", () => {
it("should accept agent claim when all gates pass", async () => {
const claim = createClaim();
const config = createConfig();
mockAllGatesPass();
const result = await orchestrator.validateCompletion(claim, config);
expect(result.verdict).toBe("accepted");
expect(result.allGatesPassed).toBe(true);
expect(result.requiredGatesFailed).toHaveLength(0);
expect(result.feedback).toBeUndefined();
});
it("should accept with warnings when only required gates pass", async () => {
const claim = createClaim();
// Create config with a non-required custom gate that will fail
const customConfig = createConfig({
gates: [
{
id: "build",
name: "Build Check",
description: "Verify code compiles",
type: "build",
command: "pnpm build",
required: true,
order: 1,
},
{
id: "lint",
name: "Lint Check",
description: "Code style check",
type: "lint",
command: "pnpm lint",
required: true,
order: 2,
},
{
id: "custom-optional",
name: "Optional Check",
description: "Non-required custom check",
type: "custom",
command: "pnpm custom-check",
expectedOutput: "EXPECTED_PATTERN_THAT_WONT_MATCH",
required: false, // Non-required gate
order: 3,
},
],
strictMode: false,
});
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
if (cmd.includes("build")) {
callback(null, { stdout: MOCK_OUTPUTS.buildSuccess.output, stderr: "" });
} else if (cmd.includes("lint")) {
callback(null, { stdout: MOCK_OUTPUTS.lintSuccess.output, stderr: "" });
} else if (cmd.includes("custom-check")) {
callback(null, { stdout: "OUTPUT_THAT_DOESNT_MATCH", stderr: "" });
} else {
callback(null, { stdout: "Success", stderr: "" });
}
});
const result = await orchestrator.validateCompletion(claim, customConfig);
expect(result.verdict).toBe("accepted");
expect(result.allGatesPassed).toBe(false);
expect(result.requiredGatesFailed).toHaveLength(0);
});
});
describe("Continuation Flow", () => {
it("should allow retry after fixing failures", async () => {
const claim = createClaim();
const config = createConfig();
// First attempt - build fails
mockGate("build", false);
const attempt1 = await orchestrator.validateCompletion(claim, config);
expect(attempt1.verdict).toBe("rejected");
expect(orchestrator.shouldContinue(attempt1, 1, config)).toBe(true);
// Second attempt - all pass
mockAllGatesPass();
const attempt2 = await orchestrator.validateCompletion(claim, config);
expect(attempt2.verdict).toBe("accepted");
expect(attempt2.allGatesPassed).toBe(true);
});
it("should escalate after max continuation attempts", async () => {
const claim = createClaim();
const config = createConfig({ maxContinuations: 3 });
mockGate("build", false);
const validation = await orchestrator.validateCompletion(claim, config);
expect(validation.verdict).toBe("rejected");
expect(orchestrator.shouldContinue(validation, 3, config)).toBe(false);
});
it("should track attempt count correctly", () => {
const claim = createClaim();
const config = createConfig();
// Spy on recordContinuation
const recordSpy = vi.spyOn(orchestrator, "recordContinuation");
const validation = {
claim,
gateResults: [],
allGatesPassed: false,
requiredGatesFailed: ["build"],
verdict: "rejected" as const,
};
orchestrator.recordContinuation("task-1", 1, validation);
orchestrator.recordContinuation("task-1", 2, validation);
orchestrator.recordContinuation("task-1", 3, validation);
expect(recordSpy).toHaveBeenCalledTimes(3);
expect(recordSpy).toHaveBeenNthCalledWith(1, "task-1", 1, validation);
expect(recordSpy).toHaveBeenNthCalledWith(2, "task-1", 2, validation);
expect(recordSpy).toHaveBeenNthCalledWith(3, "task-1", 3, validation);
});
});
describe("Escalation Flow", () => {
it("should escalate to manual review after 3 rejections", async () => {
const context: RejectionContext = {
taskId: "task-1",
workspaceId: "workspace-1",
agentId: "agent-1",
attemptCount: 3,
failures: [
{
gateName: "build",
failureType: "build-error",
message: "Compilation error",
attempts: 3,
},
],
originalTask: "Implement feature X",
startedAt: new Date("2026-01-31T10:00:00Z"),
rejectedAt: new Date("2026-01-31T12:00:00Z"),
};
const result = await rejection.handleRejection(context);
expect(result.handled).toBe(true);
expect(result.escalated).toBe(true);
expect(result.manualReviewRequired).toBe(true);
expect(result.taskState).toBe("blocked");
expect(mockPrisma.taskRejection?.create).toHaveBeenCalled();
});
it("should notify on critical failures", async () => {
const context: RejectionContext = {
taskId: "task-1",
workspaceId: "workspace-1",
agentId: "agent-1",
attemptCount: 1,
failures: [
{
gateName: "security",
failureType: "critical-security",
message: "Security vulnerability detected",
attempts: 1,
},
],
originalTask: "Implement feature X",
startedAt: new Date("2026-01-31T10:00:00Z"),
rejectedAt: new Date("2026-01-31T10:30:00Z"),
};
const result = await rejection.handleRejection(context);
expect(result.escalated).toBe(true);
expect(result.notificationsSent).toEqual(
expect.arrayContaining([expect.stringContaining("@mosaicstack.dev")])
);
});
it("should log rejection history", async () => {
const context: RejectionContext = {
taskId: "task-1",
workspaceId: "workspace-1",
agentId: "agent-1",
attemptCount: 2,
failures: [
{
gateName: "test",
failureType: "test-failure",
message: "Tests failed",
attempts: 2,
},
],
originalTask: "Implement feature X",
startedAt: new Date("2026-01-31T10:00:00Z"),
rejectedAt: new Date("2026-01-31T11:00:00Z"),
};
await rejection.handleRejection(context);
expect(mockPrisma.taskRejection?.create).toHaveBeenCalledWith(
expect.objectContaining({
data: expect.objectContaining({
taskId: "task-1",
attemptCount: 2,
}),
})
);
});
});
describe("Configuration", () => {
it("should respect workspace-specific gate configs", async () => {
const claim = createClaim();
const customConfig = createConfig({
gates: [
{
id: "custom-build",
name: "Custom Build",
description: "Custom build process",
type: "build",
command: "npm run custom-build",
required: true,
order: 1,
},
],
});
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
callback(null, { stdout: "Custom build success", stderr: "" });
});
const result = await orchestrator.validateCompletion(claim, customConfig);
expect(result.verdict).toBe("accepted");
expect(result.gateResults).toHaveLength(1);
expect(result.gateResults[0]?.gateId).toBe("custom-build");
});
it("should use default gates when no custom config", () => {
const defaultGates = orchestrator.getDefaultGates("workspace-1");
expect(defaultGates).toHaveLength(4);
expect(defaultGates.map((g) => g.id)).toEqual(["build", "lint", "test", "coverage"]);
});
it("should support custom gates", async () => {
const claim = createClaim();
const customConfig = createConfig({
gates: [
{
id: "e2e",
name: "E2E Tests",
description: "End-to-end tests",
type: "test",
command: "pnpm test:e2e",
required: true,
order: 1,
},
{
id: "performance",
name: "Performance Tests",
description: "Performance benchmarks",
type: "test",
command: "pnpm test:perf",
required: false,
order: 2,
},
],
});
mockAllGatesPass();
const result = await orchestrator.validateCompletion(claim, customConfig);
expect(result.verdict).toBe("accepted");
expect(result.gateResults).toHaveLength(2);
expect(result.gateResults.map((g) => g.gateId)).toEqual(["e2e", "performance"]);
});
});
describe("Performance", () => {
it("should complete gate validation within timeout", async () => {
const claim = createClaim();
const config = createConfig();
mockAllGatesPass();
const startTime = Date.now();
const result = await orchestrator.validateCompletion(claim, config);
const duration = Date.now() - startTime;
expect(result.verdict).toBe("accepted");
expect(duration).toBeLessThan(5000); // Should complete in under 5 seconds
});
it("should not exceed memory limits", async () => {
const claim = createClaim({ filesChanged: Array(1000).fill("file.ts") });
const config = createConfig();
mockAllGatesPass();
const initialMemory = process.memoryUsage().heapUsed;
await orchestrator.validateCompletion(claim, config);
const finalMemory = process.memoryUsage().heapUsed;
const memoryIncrease = finalMemory - initialMemory;
expect(memoryIncrease).toBeLessThan(100 * 1024 * 1024); // Less than 100MB
});
});
describe("Complete E2E Flow", () => {
it("should handle full rejection-continuation-acceptance cycle", async () => {
const claim = createClaim({ filesChanged: ["feature.ts"] });
const config = createConfig();
// Attempt 1: Build fails
mockGate("build", false);
const result1 = await orchestrator.validateCompletion(claim, config);
expect(result1.verdict).toBe("rejected");
expect(result1.requiredGatesFailed).toContain("build");
orchestrator.recordContinuation("task-1", 1, result1);
// Generate continuation prompt
const prompt1 = prompts.generatePrompt({
taskId: "task-1",
originalTask: "Implement feature X",
attemptNumber: 1,
maxAttempts: 3,
failures: [
{
type: "build-error",
message: "Compilation failed",
},
],
filesChanged: claim.filesChanged,
});
expect(prompt1.systemPrompt).toContain("not completed successfully");
expect(prompt1.constraints.length).toBeGreaterThan(0);
// Attempt 2: Build passes, tests fail
execMock.mockImplementation((cmd: string, opts: unknown, callback: CallableFunction) => {
if (cmd.includes("build")) {
callback(null, { stdout: MOCK_OUTPUTS.buildSuccess.output, stderr: "" });
} else if (cmd.includes("test")) {
const error = new Error(MOCK_OUTPUTS.testFailure.output);
Object.assign(error, { code: 1 });
callback(error);
} else {
callback(null, { stdout: "Success", stderr: "" });
}
});
const claim2 = createClaim({ filesChanged: ["feature.ts", "feature.spec.ts"] });
const result2 = await orchestrator.validateCompletion(claim2, config);
expect(result2.verdict).toBe("rejected");
expect(result2.requiredGatesFailed).toContain("test");
orchestrator.recordContinuation("task-1", 2, result2);
// Attempt 3: All gates pass
mockAllGatesPass();
const claim3 = createClaim({ filesChanged: ["feature.ts", "feature.spec.ts"] });
const result3 = await orchestrator.validateCompletion(claim3, config);
expect(result3.verdict).toBe("accepted");
expect(result3.allGatesPassed).toBe(true);
expect(result3.requiredGatesFailed).toHaveLength(0);
});
it("should handle rejection and escalation after max attempts", async () => {
const claim = createClaim();
const config = createConfig({ maxContinuations: 3 });
// All attempts fail
mockGate("build", false);
// Attempt 1
const result1 = await orchestrator.validateCompletion(claim, config);
expect(result1.verdict).toBe("rejected");
orchestrator.recordContinuation("task-1", 1, result1);
expect(orchestrator.shouldContinue(result1, 1, config)).toBe(true);
// Attempt 2
const result2 = await orchestrator.validateCompletion(claim, config);
expect(result2.verdict).toBe("rejected");
orchestrator.recordContinuation("task-1", 2, result2);
expect(orchestrator.shouldContinue(result2, 2, config)).toBe(true);
// Attempt 3
const result3 = await orchestrator.validateCompletion(claim, config);
expect(result3.verdict).toBe("rejected");
orchestrator.recordContinuation("task-1", 3, result3);
expect(orchestrator.shouldContinue(result3, 3, config)).toBe(false);
// Escalate after 3 attempts
const context: RejectionContext = {
taskId: "task-1",
workspaceId: "workspace-1",
agentId: "agent-1",
attemptCount: 3,
failures: [
{
gateName: "build",
failureType: "build-error",
message: "Compilation error",
attempts: 3,
},
],
originalTask: "Implement feature X",
startedAt: new Date("2026-01-31T10:00:00Z"),
rejectedAt: new Date("2026-01-31T12:00:00Z"),
};
const escalationResult = await rejection.handleRejection(context);
expect(escalationResult.escalated).toBe(true);
expect(escalationResult.manualReviewRequired).toBe(true);
expect(escalationResult.taskState).toBe("blocked");
});
it("should generate comprehensive rejection report", () => {
const context: RejectionContext = {
taskId: "task-1",
workspaceId: "workspace-1",
agentId: "agent-1",
attemptCount: 3,
failures: [
{
gateName: "build",
failureType: "build-error",
message: "TypeScript compilation failed",
attempts: 3,
},
{
gateName: "test",
failureType: "test-failure",
message: "5 tests failed",
attempts: 2,
},
],
originalTask: "Implement feature X with comprehensive tests",
startedAt: new Date("2026-01-31T10:00:00Z"),
rejectedAt: new Date("2026-01-31T12:30:00Z"),
};
const report = rejection.generateRejectionReport(context);
expect(report).toContain("Task Rejection Report");
expect(report).toContain("task-1");
expect(report).toContain("workspace-1");
expect(report).toContain("agent-1");
expect(report).toContain("3");
expect(report).toContain("TypeScript compilation failed");
expect(report).toContain("5 tests failed");
expect(report).toContain("Implement feature X");
});
});
});

View File

@@ -0,0 +1,6 @@
/**
* Test fixtures for integration testing
*/
export * from "./mock-agent-outputs";
export * from "./mock-gate-configs";

View File

@@ -0,0 +1,162 @@
/**
* Mock agent outputs for integration testing
* Simulates various gate execution results
*/
export interface MockAgentOutput {
output: string;
exitCode: number;
}
export const MOCK_OUTPUTS = {
buildSuccess: {
output: `
✓ Build completed successfully
Time: 3.2s
Artifacts: dist/main.js
`,
exitCode: 0,
},
buildFailure: {
output: `
✗ Build failed
src/feature.ts:15:7 - error TS2304: Cannot find name 'foo'.
src/feature.ts:28:12 - error TS2339: Property 'bar' does not exist on type 'FeatureService'.
Found 2 errors in 1 file.
`,
exitCode: 1,
},
lintSuccess: {
output: `
✓ ESLint check passed
0 problems (0 errors, 0 warnings)
15 files checked
`,
exitCode: 0,
},
lintFailure: {
output: `
✗ ESLint check failed
src/feature.ts
15:7 error 'foo' is not defined no-undef
28:12 error Missing return type @typescript-eslint/explicit-function-return-type
12 errors and 5 warnings found
`,
exitCode: 1,
},
testSuccess: {
output: `
PASS src/feature.spec.ts
FeatureService
✓ should create feature (15ms)
✓ should update feature (12ms)
✓ should delete feature (8ms)
Test Suites: 1 passed, 1 total
Tests: 50 passed, 50 total
Snapshots: 0 total
Time: 4.521 s
`,
exitCode: 0,
},
testFailure: {
output: `
FAIL src/feature.spec.ts
FeatureService
✓ should create feature (15ms)
✗ should update feature (12ms)
✗ should delete feature (8ms)
● FeatureService should update feature
expect(received).toBe(expected)
Expected: "updated"
Received: "created"
Test Suites: 1 failed, 1 total
Tests: 45 passed, 5 failed, 50 total
Snapshots: 0 total
Time: 4.521 s
`,
exitCode: 1,
},
coveragePass: {
output: `
--------------------|---------|----------|---------|---------|-------------------
File | % Stmts | % Branch | % Funcs | % Lines | Uncovered Line #s
--------------------|---------|----------|---------|---------|-------------------
All files | 87.45 | 85.23 | 90.12 | 86.78 |
feature.service.ts | 92.31 | 88.89 | 95.00 | 91.67 | 45-48
feature.module.ts | 82.14 | 81.25 | 85.71 | 81.82 | 12,34
--------------------|---------|----------|---------|---------|-------------------
`,
exitCode: 0,
},
coverageFail: {
output: `
--------------------|---------|----------|---------|---------|-------------------
File | % Stmts | % Branch | % Funcs | % Lines | Uncovered Line #s
--------------------|---------|----------|---------|---------|-------------------
All files | 72.15 | 68.42 | 75.23 | 70.89 |
feature.service.ts | 65.38 | 60.00 | 70.00 | 64.29 | 15-28,45-62
feature.module.ts | 78.57 | 75.00 | 80.00 | 77.27 | 12,34,56
--------------------|---------|----------|---------|---------|-------------------
`,
exitCode: 0,
},
securityPass: {
output: `
✓ Security audit passed
0 vulnerabilities found
Scanned 1,245 packages
`,
exitCode: 0,
},
securityFailure: {
output: `
✗ Security audit failed
found 3 high severity vulnerabilities
lodash <4.17.21
Severity: high
Prototype Pollution - https://github.com/advisories/GHSA-xxxxx
Run npm audit fix to fix them
`,
exitCode: 1,
},
typeCheckSuccess: {
output: `
✓ Type check passed
No type errors found
Checked 45 files
`,
exitCode: 0,
},
typeCheckFailure: {
output: `
✗ Type check failed
src/feature.ts:15:7 - error TS2322: Type 'string' is not assignable to type 'number'.
src/feature.ts:28:12 - error TS2345: Argument of type 'undefined' is not assignable to parameter of type 'string'.
Found 2 errors in 1 file.
`,
exitCode: 1,
},
};
export const MOCK_FILE_CHANGES = {
minimal: ["src/feature.ts"],
withTests: ["src/feature.ts", "src/feature.spec.ts"],
withDocs: ["src/feature.ts", "src/feature.spec.ts", "README.md"],
multiFile: [
"src/feature.ts",
"src/feature.spec.ts",
"src/feature.module.ts",
"src/feature.controller.ts",
"src/feature.dto.ts",
],
};

View File

@@ -0,0 +1,173 @@
/**
* Mock gate configurations for integration testing
*/
export interface QualityGateConfig {
id: string;
workspaceId: string;
name: string;
description: string;
isActive: boolean;
isDefault: boolean;
gates: Record<string, unknown>;
createdAt: Date;
updatedAt: Date;
}
export const MOCK_GATE_CONFIGS = {
default: {
id: "config-default",
workspaceId: "workspace-1",
name: "Default Quality Gates",
description: "Standard quality gates for all tasks",
isActive: true,
isDefault: true,
gates: {
build: {
enabled: true,
required: true,
command: "pnpm build",
timeout: 300000,
},
lint: {
enabled: true,
required: true,
command: "pnpm lint",
timeout: 120000,
},
test: {
enabled: true,
required: true,
command: "pnpm test",
timeout: 300000,
},
coverage: {
enabled: true,
required: false,
command: "pnpm test:coverage",
timeout: 300000,
threshold: 85,
},
},
createdAt: new Date("2026-01-01T00:00:00Z"),
updatedAt: new Date("2026-01-01T00:00:00Z"),
} as QualityGateConfig,
strict: {
id: "config-strict",
workspaceId: "workspace-1",
name: "Strict Quality Gates",
description: "Strict quality gates for critical features",
isActive: true,
isDefault: false,
gates: {
build: {
enabled: true,
required: true,
command: "pnpm build",
timeout: 300000,
},
lint: {
enabled: true,
required: true,
command: "pnpm lint",
timeout: 120000,
},
test: {
enabled: true,
required: true,
command: "pnpm test",
timeout: 300000,
},
coverage: {
enabled: true,
required: true,
command: "pnpm test:coverage",
timeout: 300000,
threshold: 90,
},
typecheck: {
enabled: true,
required: true,
command: "pnpm typecheck",
timeout: 180000,
},
security: {
enabled: true,
required: true,
command: "pnpm audit",
timeout: 120000,
},
},
createdAt: new Date("2026-01-01T00:00:00Z"),
updatedAt: new Date("2026-01-01T00:00:00Z"),
} as QualityGateConfig,
minimal: {
id: "config-minimal",
workspaceId: "workspace-1",
name: "Minimal Quality Gates",
description: "Minimal quality gates for rapid iteration",
isActive: true,
isDefault: false,
gates: {
build: {
enabled: true,
required: true,
command: "pnpm build",
timeout: 300000,
},
lint: {
enabled: true,
required: false,
command: "pnpm lint",
timeout: 120000,
},
test: {
enabled: false,
required: false,
command: "pnpm test",
timeout: 300000,
},
},
createdAt: new Date("2026-01-01T00:00:00Z"),
updatedAt: new Date("2026-01-01T00:00:00Z"),
} as QualityGateConfig,
customGates: {
id: "config-custom",
workspaceId: "workspace-1",
name: "Custom Quality Gates",
description: "Custom quality gates with non-standard checks",
isActive: true,
isDefault: false,
gates: {
build: {
enabled: true,
required: true,
command: "pnpm build",
timeout: 300000,
},
"custom-e2e": {
enabled: true,
required: true,
command: "pnpm test:e2e",
timeout: 600000,
},
"custom-integration": {
enabled: true,
required: false,
command: "pnpm test:integration",
timeout: 480000,
},
"custom-performance": {
enabled: true,
required: false,
command: "pnpm test:perf",
timeout: 300000,
},
},
createdAt: new Date("2026-01-01T00:00:00Z"),
updatedAt: new Date("2026-01-01T00:00:00Z"),
} as QualityGateConfig,
};