fix(#196): fix race condition in job status updates

Implemented optimistic locking with version field and SELECT FOR UPDATE
transactions to prevent data corruption from concurrent job status updates.

Changes:
- Added version field to RunnerJob schema for optimistic locking
- Created migration 20260202_add_runner_job_version_for_concurrency
- Implemented ConcurrentUpdateException for conflict detection
- Updated RunnerJobsService methods with optimistic locking:
  * updateStatus() - with version checking and retry logic
  * updateProgress() - with version checking and retry logic
  * cancel() - with version checking and retry logic
- Updated CoordinatorIntegrationService with SELECT FOR UPDATE:
  * updateJobStatus() - transaction with row locking
  * completeJob() - transaction with row locking
  * failJob() - transaction with row locking
  * updateJobProgress() - optimistic locking
- Added retry mechanism (3 attempts) with exponential backoff
- Added comprehensive concurrency tests (10 tests, all passing)
- Updated existing test mocks to support updateMany

Test Results:
- All 10 concurrency tests passing ✓
- Tests cover concurrent status updates, progress updates, completions,
  cancellations, retry logic, and exponential backoff

This fix prevents race conditions that could cause:
- Lost job results (double completion)
- Lost progress updates
- Invalid status transitions
- Data corruption under concurrent access

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Jason Woltje
2026-02-02 12:51:17 -06:00
parent a3b48dd631
commit ef25167c24
251 changed files with 7045 additions and 261 deletions

View File

@@ -0,0 +1,7 @@
-- Add version field for optimistic locking to prevent race conditions
-- This allows safe concurrent updates to runner job status
ALTER TABLE "runner_jobs" ADD COLUMN "version" INTEGER NOT NULL DEFAULT 1;
-- Create index for better performance on version checks
CREATE INDEX "runner_jobs_version_idx" ON "runner_jobs"("version");

View File

@@ -1135,6 +1135,7 @@ model RunnerJob {
status RunnerJobStatus @default(PENDING)
priority Int
progressPercent Int @default(0) @map("progress_percent")
version Int @default(1) // Optimistic locking version
// Results
result Json?

View File

@@ -1,6 +1,6 @@
import { Injectable, Logger } from "@nestjs/common";
import { PrismaService } from "../prisma/prisma.service";
import { ActivityAction, EntityType, Prisma } from "@prisma/client";
import { ActivityAction, EntityType, Prisma, ActivityLog } from "@prisma/client";
import type {
CreateActivityLogInput,
PaginatedActivityLogs,
@@ -20,7 +20,7 @@ export class ActivityService {
/**
* Create a new activity log entry
*/
async logActivity(input: CreateActivityLogInput) {
async logActivity(input: CreateActivityLogInput): Promise<ActivityLog> {
try {
return await this.prisma.activityLog.create({
data: input as unknown as Prisma.ActivityLogCreateInput,
@@ -167,7 +167,7 @@ export class ActivityService {
userId: string,
taskId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -186,7 +186,7 @@ export class ActivityService {
userId: string,
taskId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -205,7 +205,7 @@ export class ActivityService {
userId: string,
taskId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -224,7 +224,7 @@ export class ActivityService {
userId: string,
taskId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -238,7 +238,12 @@ export class ActivityService {
/**
* Log task assignment
*/
async logTaskAssigned(workspaceId: string, userId: string, taskId: string, assigneeId: string) {
async logTaskAssigned(
workspaceId: string,
userId: string,
taskId: string,
assigneeId: string
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -257,7 +262,7 @@ export class ActivityService {
userId: string,
eventId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -276,7 +281,7 @@ export class ActivityService {
userId: string,
eventId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -295,7 +300,7 @@ export class ActivityService {
userId: string,
eventId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -314,7 +319,7 @@ export class ActivityService {
userId: string,
projectId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -333,7 +338,7 @@ export class ActivityService {
userId: string,
projectId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -352,7 +357,7 @@ export class ActivityService {
userId: string,
projectId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -366,7 +371,11 @@ export class ActivityService {
/**
* Log workspace creation
*/
async logWorkspaceCreated(workspaceId: string, userId: string, details?: Prisma.JsonValue) {
async logWorkspaceCreated(
workspaceId: string,
userId: string,
details?: Prisma.JsonValue
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -380,7 +389,11 @@ export class ActivityService {
/**
* Log workspace update
*/
async logWorkspaceUpdated(workspaceId: string, userId: string, details?: Prisma.JsonValue) {
async logWorkspaceUpdated(
workspaceId: string,
userId: string,
details?: Prisma.JsonValue
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -399,7 +412,7 @@ export class ActivityService {
userId: string,
memberId: string,
role: string
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -413,7 +426,11 @@ export class ActivityService {
/**
* Log workspace member removed
*/
async logWorkspaceMemberRemoved(workspaceId: string, userId: string, memberId: string) {
async logWorkspaceMemberRemoved(
workspaceId: string,
userId: string,
memberId: string
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -427,7 +444,11 @@ export class ActivityService {
/**
* Log user profile update
*/
async logUserUpdated(workspaceId: string, userId: string, details?: Prisma.JsonValue) {
async logUserUpdated(
workspaceId: string,
userId: string,
details?: Prisma.JsonValue
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -446,7 +467,7 @@ export class ActivityService {
userId: string,
domainId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -465,7 +486,7 @@ export class ActivityService {
userId: string,
domainId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -484,7 +505,7 @@ export class ActivityService {
userId: string,
domainId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -503,7 +524,7 @@ export class ActivityService {
userId: string,
ideaId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -522,7 +543,7 @@ export class ActivityService {
userId: string,
ideaId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,
@@ -541,7 +562,7 @@ export class ActivityService {
userId: string,
ideaId: string,
details?: Prisma.JsonValue
) {
): Promise<ActivityLog> {
return this.logActivity({
workspaceId,
userId,

View File

@@ -17,14 +17,19 @@ export class AuthService {
/**
* Get BetterAuth instance
*/
getAuth() {
getAuth(): Auth {
return this.auth;
}
/**
* Get user by ID
*/
async getUserById(userId: string) {
async getUserById(userId: string): Promise<{
id: string;
email: string;
name: string;
authProviderId: string | null;
} | null> {
return this.prisma.user.findUnique({
where: { id: userId },
select: {
@@ -39,7 +44,12 @@ export class AuthService {
/**
* Get user by email
*/
async getUserByEmail(email: string) {
async getUserByEmail(email: string): Promise<{
id: string;
email: string;
name: string;
authProviderId: string | null;
} | null> {
return this.prisma.user.findUnique({
where: { email },
select: {

View File

@@ -0,0 +1,23 @@
import { ConflictException } from "@nestjs/common";
/**
* Exception thrown when a concurrent update conflict is detected
* This occurs when optimistic locking detects that a record has been
* modified by another process between read and write operations
*/
export class ConcurrentUpdateException extends ConflictException {
constructor(resourceType: string, resourceId: string, currentVersion?: number) {
const message = currentVersion
? `Concurrent update detected for ${resourceType} ${resourceId} at version ${currentVersion}. The record was modified by another process.`
: `Concurrent update detected for ${resourceType} ${resourceId}. The record was modified by another process.`;
super({
message,
error: "Concurrent Update Conflict",
resourceType,
resourceId,
currentVersion,
retryable: true,
});
}
}

View File

@@ -0,0 +1,392 @@
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { ConflictException } from "@nestjs/common";
import { CoordinatorIntegrationService } from "./coordinator-integration.service";
import { PrismaService } from "../prisma/prisma.service";
import { JobEventsService } from "../job-events/job-events.service";
import { HeraldService } from "../herald/herald.service";
import { BullMqService } from "../bullmq/bullmq.service";
import { RunnerJobStatus } from "@prisma/client";
import { CoordinatorJobStatus, UpdateJobStatusDto } from "./dto";
/**
* Concurrency tests for CoordinatorIntegrationService
* Focus on race conditions during coordinator job status updates
*/
describe("CoordinatorIntegrationService - Concurrency", () => {
let service: CoordinatorIntegrationService;
let prisma: PrismaService;
const mockJobEventsService = {
emitJobCreated: vi.fn(),
emitJobStarted: vi.fn(),
emitJobCompleted: vi.fn(),
emitJobFailed: vi.fn(),
emitEvent: vi.fn(),
};
const mockHeraldService = {
broadcastJobEvent: vi.fn(),
};
const mockBullMqService = {
addJob: vi.fn(),
};
beforeEach(async () => {
const module: TestingModule = await Test.createTestingModule({
providers: [
CoordinatorIntegrationService,
{
provide: PrismaService,
useValue: {
runnerJob: {
findUnique: vi.fn(),
update: vi.fn(),
updateMany: vi.fn(),
},
$transaction: vi.fn(),
$queryRaw: vi.fn(),
},
},
{
provide: JobEventsService,
useValue: mockJobEventsService,
},
{
provide: HeraldService,
useValue: mockHeraldService,
},
{
provide: BullMqService,
useValue: mockBullMqService,
},
],
}).compile();
service = module.get<CoordinatorIntegrationService>(CoordinatorIntegrationService);
prisma = module.get<PrismaService>(PrismaService);
vi.clearAllMocks();
});
describe("concurrent status updates from coordinator", () => {
it("should use SELECT FOR UPDATE to prevent race conditions", async () => {
const jobId = "job-123";
const dto: UpdateJobStatusDto = {
status: CoordinatorJobStatus.RUNNING,
agentId: "agent-1",
agentType: "python",
};
const mockJob = {
id: jobId,
status: RunnerJobStatus.PENDING,
workspaceId: "workspace-123",
version: 1,
};
const updatedJob = {
...mockJob,
status: RunnerJobStatus.RUNNING,
startedAt: new Date(),
version: 2,
};
// Mock transaction with SELECT FOR UPDATE
const mockTxClient = {
$queryRaw: vi.fn().mockResolvedValue([mockJob]),
runnerJob: {
update: vi.fn().mockResolvedValue(updatedJob),
},
};
vi.mocked(prisma.$transaction).mockImplementation(async (callback: any) => {
return callback(mockTxClient);
});
const mockEvent = {
id: "event-1",
jobId,
type: "job.started",
timestamp: new Date(),
};
vi.mocked(mockJobEventsService.emitJobStarted).mockResolvedValue(mockEvent as any);
const result = await service.updateJobStatus(jobId, dto);
expect(result.status).toBe(RunnerJobStatus.RUNNING);
// Verify SELECT FOR UPDATE was used
expect(mockTxClient.$queryRaw).toHaveBeenCalledWith(
expect.anything() // Raw SQL with FOR UPDATE
);
});
it("should handle concurrent status updates by coordinator and API", async () => {
const jobId = "job-123";
// Coordinator tries to mark as RUNNING
const coordinatorDto: UpdateJobStatusDto = {
status: CoordinatorJobStatus.RUNNING,
};
// Simulate transaction lock timeout (another process holds lock)
vi.mocked(prisma.$transaction).mockRejectedValue(new Error("could not obtain lock on row"));
await expect(service.updateJobStatus(jobId, coordinatorDto)).rejects.toThrow();
});
it("should serialize concurrent status transitions", async () => {
const jobId = "job-123";
const mockJob = {
id: jobId,
status: RunnerJobStatus.PENDING,
workspaceId: "workspace-123",
version: 1,
};
// Simulate transaction that waits for lock, then proceeds
const mockTxClient = {
$queryRaw: vi.fn().mockResolvedValue([mockJob]),
runnerJob: {
update: vi.fn().mockResolvedValue({
...mockJob,
status: RunnerJobStatus.RUNNING,
version: 2,
}),
},
};
vi.mocked(prisma.$transaction).mockImplementation(async (callback: any) => {
// Simulate delay while waiting for lock
await new Promise((resolve) => setTimeout(resolve, 100));
return callback(mockTxClient);
});
const dto: UpdateJobStatusDto = {
status: CoordinatorJobStatus.RUNNING,
};
vi.mocked(mockJobEventsService.emitJobStarted).mockResolvedValue({
id: "event-1",
jobId,
type: "job.started",
timestamp: new Date(),
} as any);
const result = await service.updateJobStatus(jobId, dto);
expect(result.status).toBe(RunnerJobStatus.RUNNING);
expect(prisma.$transaction).toHaveBeenCalled();
});
});
describe("concurrent completion from coordinator", () => {
it("should prevent double completion using transaction", async () => {
const jobId = "job-123";
const mockJob = {
id: jobId,
status: RunnerJobStatus.RUNNING,
workspaceId: "workspace-123",
startedAt: new Date(),
version: 2,
};
const completedJob = {
...mockJob,
status: RunnerJobStatus.COMPLETED,
completedAt: new Date(),
progressPercent: 100,
result: { success: true },
version: 3,
};
const mockTxClient = {
$queryRaw: vi.fn().mockResolvedValue([mockJob]),
runnerJob: {
update: vi.fn().mockResolvedValue(completedJob),
},
};
vi.mocked(prisma.$transaction).mockImplementation(async (callback: any) => {
return callback(mockTxClient);
});
vi.mocked(mockJobEventsService.emitJobCompleted).mockResolvedValue({
id: "event-1",
jobId,
type: "job.completed",
timestamp: new Date(),
} as any);
const result = await service.completeJob(jobId, {
result: { success: true },
tokensUsed: 1000,
durationSeconds: 120,
});
expect(result.status).toBe(RunnerJobStatus.COMPLETED);
expect(mockTxClient.$queryRaw).toHaveBeenCalled();
});
it("should handle concurrent completion and failure attempts", async () => {
const jobId = "job-123";
const mockJob = {
id: jobId,
status: RunnerJobStatus.RUNNING,
workspaceId: "workspace-123",
startedAt: new Date(),
version: 2,
};
// First transaction (completion) succeeds
const completedJob = {
...mockJob,
status: RunnerJobStatus.COMPLETED,
completedAt: new Date(),
version: 3,
};
// Second transaction (failure) sees completed job and should fail
const mockTxClient1 = {
$queryRaw: vi.fn().mockResolvedValue([mockJob]),
runnerJob: {
update: vi.fn().mockResolvedValue(completedJob),
},
};
const mockTxClient2 = {
$queryRaw: vi.fn().mockResolvedValue([completedJob]), // Job already completed
runnerJob: {
update: vi.fn(),
},
};
vi.mocked(prisma.$transaction)
.mockImplementationOnce(async (callback: any) => callback(mockTxClient1))
.mockImplementationOnce(async (callback: any) => callback(mockTxClient2));
vi.mocked(mockJobEventsService.emitJobCompleted).mockResolvedValue({
id: "event-1",
jobId,
type: "job.completed",
timestamp: new Date(),
} as any);
// First call (completion) succeeds
const result1 = await service.completeJob(jobId, {
result: { success: true },
});
expect(result1.status).toBe(RunnerJobStatus.COMPLETED);
// Second call (failure) should be rejected due to invalid status transition
await expect(
service.failJob(jobId, {
error: "Something went wrong",
})
).rejects.toThrow();
});
});
describe("concurrent progress updates from coordinator", () => {
it("should handle rapid progress updates safely", async () => {
const jobId = "job-123";
const progressUpdates = [25, 50, 75];
for (const progress of progressUpdates) {
const mockJob = {
id: jobId,
status: RunnerJobStatus.RUNNING,
progressPercent: progress - 25,
version: progress / 25, // version increases with each update
};
const updatedJob = {
...mockJob,
progressPercent: progress,
version: mockJob.version + 1,
};
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValue({ count: 1 });
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValueOnce(updatedJob as any);
const result = await service.updateJobProgress(jobId, {
progressPercent: progress,
});
expect(result.progressPercent).toBe(progress);
}
expect(mockJobEventsService.emitEvent).toHaveBeenCalledTimes(3);
});
it("should detect version conflicts in progress updates", async () => {
const jobId = "job-123";
const mockJob = {
id: jobId,
status: RunnerJobStatus.RUNNING,
progressPercent: 50,
version: 2,
};
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
// Simulate version conflict (another update happened)
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValue({ count: 0 });
await expect(
service.updateJobProgress(jobId, {
progressPercent: 75,
})
).rejects.toThrow(ConflictException);
});
});
describe("transaction isolation", () => {
it("should use appropriate transaction isolation level", async () => {
const jobId = "job-123";
const mockJob = {
id: jobId,
status: RunnerJobStatus.PENDING,
version: 1,
};
const mockTxClient = {
$queryRaw: vi.fn().mockResolvedValue([mockJob]),
runnerJob: {
update: vi.fn().mockResolvedValue({
...mockJob,
status: RunnerJobStatus.RUNNING,
version: 2,
}),
},
};
vi.mocked(prisma.$transaction).mockImplementation(async (callback: any) => {
return callback(mockTxClient);
});
vi.mocked(mockJobEventsService.emitJobStarted).mockResolvedValue({
id: "event-1",
jobId,
type: "job.started",
timestamp: new Date(),
} as any);
await service.updateJobStatus(jobId, {
status: CoordinatorJobStatus.RUNNING,
});
// Verify transaction was used (isolates the operation)
expect(prisma.$transaction).toHaveBeenCalled();
});
});
});

View File

@@ -6,6 +6,7 @@ import { HeraldService } from "../herald/herald.service";
import { BullMqService } from "../bullmq/bullmq.service";
import { QUEUE_NAMES } from "../bullmq/queues";
import { JOB_PROGRESS } from "../job-events/event-types";
import { ConcurrentUpdateException } from "../common/exceptions/concurrent-update.exception";
import {
CoordinatorJobStatus,
type CreateCoordinatorJobDto,
@@ -98,7 +99,8 @@ export class CoordinatorIntegrationService {
}
/**
* Update job status from the coordinator
* Update job status from the coordinator using transaction with SELECT FOR UPDATE
* This ensures serialized access to job status updates from the coordinator
*/
async updateJobStatus(
jobId: string,
@@ -106,64 +108,74 @@ export class CoordinatorIntegrationService {
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.update>>> {
this.logger.log(`Updating job ${jobId} status to ${dto.status}`);
// Verify job exists
const job = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
select: { id: true, status: true, workspaceId: true },
});
return this.prisma.$transaction(async (tx) => {
// Use SELECT FOR UPDATE to lock the row during this transaction
// This prevents concurrent updates from coordinator and ensures serialization
const jobs = await tx.$queryRaw<
Array<{ id: string; status: RunnerJobStatus; workspace_id: string; version: number }>
>`
SELECT id, status, workspace_id, version
FROM runner_jobs
WHERE id = ${jobId}::uuid
FOR UPDATE
`;
if (!job) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found`);
}
if (!jobs || jobs.length === 0) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found`);
}
// Validate status transition
if (!this.isValidStatusTransition(job.status, dto.status as RunnerJobStatus)) {
throw new BadRequestException(
`Invalid status transition from ${job.status} to ${dto.status}`
);
}
const job = jobs[0];
const updateData: Prisma.RunnerJobUpdateInput = {
status: dto.status as RunnerJobStatus,
};
// Validate status transition
if (!this.isValidStatusTransition(job.status, dto.status as RunnerJobStatus)) {
throw new BadRequestException(
`Invalid status transition from ${job.status} to ${dto.status}`
);
}
// Set startedAt when transitioning to RUNNING
if (dto.status === CoordinatorJobStatus.RUNNING) {
updateData.startedAt = new Date();
}
const updateData: Prisma.RunnerJobUpdateInput = {
status: dto.status as RunnerJobStatus,
version: { increment: 1 },
};
const updatedJob = await this.prisma.runnerJob.update({
where: { id: jobId },
data: updateData,
});
// Set startedAt when transitioning to RUNNING
if (dto.status === CoordinatorJobStatus.RUNNING) {
updateData.startedAt = new Date();
}
// Emit appropriate event
if (dto.status === CoordinatorJobStatus.RUNNING) {
const event = await this.jobEvents.emitJobStarted(jobId, {
agentId: dto.agentId,
agentType: dto.agentType,
const updatedJob = await tx.runnerJob.update({
where: { id: jobId },
data: updateData,
});
// Broadcast via Herald
await this.herald.broadcastJobEvent(jobId, event);
}
// Emit appropriate event (outside of critical section but inside transaction)
if (dto.status === CoordinatorJobStatus.RUNNING) {
const event = await this.jobEvents.emitJobStarted(jobId, {
agentId: dto.agentId,
agentType: dto.agentType,
});
return updatedJob;
// Broadcast via Herald
await this.herald.broadcastJobEvent(jobId, event);
}
return updatedJob;
});
}
/**
* Update job progress from the coordinator
* Update job progress from the coordinator with optimistic locking
*/
async updateJobProgress(
jobId: string,
dto: UpdateJobProgressDto
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.update>>> {
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.findUnique>>> {
this.logger.log(`Updating job ${jobId} progress to ${String(dto.progressPercent)}%`);
// Verify job exists and is running
// Read current job state
const job = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
select: { id: true, status: true },
select: { id: true, status: true, version: true },
});
if (!job) {
@@ -174,11 +186,31 @@ export class CoordinatorIntegrationService {
throw new BadRequestException(`Cannot update progress for job with status ${job.status}`);
}
const updatedJob = await this.prisma.runnerJob.update({
where: { id: jobId },
data: { progressPercent: dto.progressPercent },
// Use updateMany with version check for optimistic locking
const result = await this.prisma.runnerJob.updateMany({
where: {
id: jobId,
version: job.version,
},
data: {
progressPercent: dto.progressPercent,
version: { increment: 1 },
},
});
if (result.count === 0) {
throw new ConcurrentUpdateException("RunnerJob", jobId, job.version);
}
// Fetch updated job
const updatedJob = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
});
if (!updatedJob) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found after update`);
}
// Emit progress event
await this.jobEvents.emitEvent(jobId, {
type: JOB_PROGRESS,
@@ -194,7 +226,7 @@ export class CoordinatorIntegrationService {
}
/**
* Mark job as completed from the coordinator
* Mark job as completed from the coordinator using transaction with SELECT FOR UPDATE
*/
async completeJob(
jobId: string,
@@ -202,57 +234,68 @@ export class CoordinatorIntegrationService {
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.update>>> {
this.logger.log(`Completing job ${jobId}`);
// Verify job exists
const job = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
select: { id: true, status: true, startedAt: true },
return this.prisma.$transaction(async (tx) => {
// Lock the row to prevent concurrent completion/failure
const jobs = await tx.$queryRaw<
Array<{ id: string; status: RunnerJobStatus; started_at: Date | null; version: number }>
>`
SELECT id, status, started_at, version
FROM runner_jobs
WHERE id = ${jobId}::uuid
FOR UPDATE
`;
if (!jobs || jobs.length === 0) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found`);
}
const job = jobs[0];
// Validate status transition
if (!this.isValidStatusTransition(job.status, RunnerJobStatus.COMPLETED)) {
throw new BadRequestException(`Cannot complete job with status ${job.status}`);
}
// Calculate duration if not provided
let durationSeconds = dto.durationSeconds;
if (durationSeconds === undefined && job.started_at) {
durationSeconds = Math.round(
(new Date().getTime() - new Date(job.started_at).getTime()) / 1000
);
}
const updateData: Prisma.RunnerJobUpdateInput = {
status: RunnerJobStatus.COMPLETED,
progressPercent: 100,
completedAt: new Date(),
version: { increment: 1 },
};
if (dto.result) {
updateData.result = dto.result as Prisma.InputJsonValue;
}
const updatedJob = await tx.runnerJob.update({
where: { id: jobId },
data: updateData,
});
// Emit completion event
const event = await this.jobEvents.emitJobCompleted(jobId, {
result: dto.result,
tokensUsed: dto.tokensUsed,
durationSeconds,
});
// Broadcast via Herald
await this.herald.broadcastJobEvent(jobId, event);
return updatedJob;
});
if (!job) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found`);
}
// Validate status transition
if (!this.isValidStatusTransition(job.status, RunnerJobStatus.COMPLETED)) {
throw new BadRequestException(`Cannot complete job with status ${job.status}`);
}
// Calculate duration if not provided
let durationSeconds = dto.durationSeconds;
if (durationSeconds === undefined && job.startedAt) {
durationSeconds = Math.round((new Date().getTime() - job.startedAt.getTime()) / 1000);
}
const updateData: Prisma.RunnerJobUpdateInput = {
status: RunnerJobStatus.COMPLETED,
progressPercent: 100,
completedAt: new Date(),
};
if (dto.result) {
updateData.result = dto.result as Prisma.InputJsonValue;
}
const updatedJob = await this.prisma.runnerJob.update({
where: { id: jobId },
data: updateData,
});
// Emit completion event
const event = await this.jobEvents.emitJobCompleted(jobId, {
result: dto.result,
tokensUsed: dto.tokensUsed,
durationSeconds,
});
// Broadcast via Herald
await this.herald.broadcastJobEvent(jobId, event);
return updatedJob;
}
/**
* Mark job as failed from the coordinator
* Mark job as failed from the coordinator using transaction with SELECT FOR UPDATE
*/
async failJob(
jobId: string,
@@ -260,42 +303,51 @@ export class CoordinatorIntegrationService {
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.update>>> {
this.logger.log(`Failing job ${jobId}: ${dto.error}`);
// Verify job exists
const job = await this.prisma.runnerJob.findUnique({
where: { id: jobId },
select: { id: true, status: true },
});
return this.prisma.$transaction(async (tx) => {
// Lock the row to prevent concurrent completion/failure
const jobs = await tx.$queryRaw<
Array<{ id: string; status: RunnerJobStatus; version: number }>
>`
SELECT id, status, version
FROM runner_jobs
WHERE id = ${jobId}::uuid
FOR UPDATE
`;
if (!job) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found`);
}
if (!jobs || jobs.length === 0) {
throw new NotFoundException(`RunnerJob with ID ${jobId} not found`);
}
// Validate status transition
if (!this.isValidStatusTransition(job.status, RunnerJobStatus.FAILED)) {
throw new BadRequestException(`Cannot fail job with status ${job.status}`);
}
const job = jobs[0];
const updatedJob = await this.prisma.runnerJob.update({
where: { id: jobId },
data: {
status: RunnerJobStatus.FAILED,
// Validate status transition
if (!this.isValidStatusTransition(job.status, RunnerJobStatus.FAILED)) {
throw new BadRequestException(`Cannot fail job with status ${job.status}`);
}
const updatedJob = await tx.runnerJob.update({
where: { id: jobId },
data: {
status: RunnerJobStatus.FAILED,
error: dto.error,
completedAt: new Date(),
version: { increment: 1 },
},
});
// Emit failure event
const event = await this.jobEvents.emitJobFailed(jobId, {
error: dto.error,
completedAt: new Date(),
},
gateResults: dto.gateResults,
failedStep: dto.failedStep,
continuationPrompt: dto.continuationPrompt,
});
// Broadcast via Herald
await this.herald.broadcastJobEvent(jobId, event);
return updatedJob;
});
// Emit failure event
const event = await this.jobEvents.emitJobFailed(jobId, {
error: dto.error,
gateResults: dto.gateResults,
failedStep: dto.failedStep,
continuationPrompt: dto.continuationPrompt,
});
// Broadcast via Herald
await this.herald.broadcastJobEvent(jobId, event);
return updatedJob;
}
/**

View File

@@ -1,4 +1,15 @@
import { IsString, IsOptional, IsNumber, IsObject, Min, Max, IsUUID, MinLength, MaxLength, IsInt } from "class-validator";
import {
IsString,
IsOptional,
IsNumber,
IsObject,
Min,
Max,
IsUUID,
MinLength,
MaxLength,
IsInt,
} from "class-validator";
/**
* DTO for creating a job from the coordinator

View File

@@ -1,9 +1,13 @@
import { Injectable, NotFoundException } from "@nestjs/common";
import { Prisma } from "@prisma/client";
import { Prisma, Domain } from "@prisma/client";
import { PrismaService } from "../prisma/prisma.service";
import { ActivityService } from "../activity/activity.service";
import type { CreateDomainDto, UpdateDomainDto, QueryDomainsDto } from "./dto";
type DomainWithCount = Domain & {
_count: { tasks: number; events: number; projects: number; ideas: number };
};
/**
* Service for managing domains
*/
@@ -17,7 +21,11 @@ export class DomainsService {
/**
* Create a new domain
*/
async create(workspaceId: string, userId: string, createDomainDto: CreateDomainDto) {
async create(
workspaceId: string,
userId: string,
createDomainDto: CreateDomainDto
): Promise<DomainWithCount> {
const domain = await this.prisma.domain.create({
data: {
name: createDomainDto.name,
@@ -49,7 +57,15 @@ export class DomainsService {
/**
* Get paginated domains with filters
*/
async findAll(query: QueryDomainsDto) {
async findAll(query: QueryDomainsDto): Promise<{
data: DomainWithCount[];
meta: {
total: number;
page: number;
limit: number;
totalPages: number;
};
}> {
const page = query.page ?? 1;
const limit = query.limit ?? 50;
const skip = (page - 1) * limit;
@@ -101,7 +117,7 @@ export class DomainsService {
/**
* Get a single domain by ID
*/
async findOne(id: string, workspaceId: string) {
async findOne(id: string, workspaceId: string): Promise<DomainWithCount> {
const domain = await this.prisma.domain.findUnique({
where: {
id,
@@ -124,7 +140,12 @@ export class DomainsService {
/**
* Update a domain
*/
async update(id: string, workspaceId: string, userId: string, updateDomainDto: UpdateDomainDto) {
async update(
id: string,
workspaceId: string,
userId: string,
updateDomainDto: UpdateDomainDto
): Promise<DomainWithCount> {
// Verify domain exists
const existingDomain = await this.prisma.domain.findUnique({
where: { id, workspaceId },
@@ -170,7 +191,7 @@ export class DomainsService {
/**
* Delete a domain
*/
async remove(id: string, workspaceId: string, userId: string) {
async remove(id: string, workspaceId: string, userId: string): Promise<void> {
// Verify domain exists
const domain = await this.prisma.domain.findUnique({
where: { id, workspaceId },

View File

@@ -1,9 +1,14 @@
import { Injectable, NotFoundException } from "@nestjs/common";
import { Prisma } from "@prisma/client";
import { Prisma, Event } from "@prisma/client";
import { PrismaService } from "../prisma/prisma.service";
import { ActivityService } from "../activity/activity.service";
import type { CreateEventDto, UpdateEventDto, QueryEventsDto } from "./dto";
type EventWithRelations = Event & {
creator: { id: string; name: string; email: string };
project: { id: string; name: string; color: string | null } | null;
};
/**
* Service for managing events
*/
@@ -17,7 +22,11 @@ export class EventsService {
/**
* Create a new event
*/
async create(workspaceId: string, userId: string, createEventDto: CreateEventDto) {
async create(
workspaceId: string,
userId: string,
createEventDto: CreateEventDto
): Promise<EventWithRelations> {
const projectConnection = createEventDto.projectId
? { connect: { id: createEventDto.projectId } }
: undefined;
@@ -60,7 +69,15 @@ export class EventsService {
/**
* Get paginated events with filters
*/
async findAll(query: QueryEventsDto) {
async findAll(query: QueryEventsDto): Promise<{
data: EventWithRelations[];
meta: {
total: number;
page: number;
limit: number;
totalPages: number;
};
}> {
const page = query.page ?? 1;
const limit = query.limit ?? 50;
const skip = (page - 1) * limit;
@@ -125,7 +142,7 @@ export class EventsService {
/**
* Get a single event by ID
*/
async findOne(id: string, workspaceId: string) {
async findOne(id: string, workspaceId: string): Promise<EventWithRelations> {
const event = await this.prisma.event.findUnique({
where: {
id,
@@ -151,7 +168,12 @@ export class EventsService {
/**
* Update an event
*/
async update(id: string, workspaceId: string, userId: string, updateEventDto: UpdateEventDto) {
async update(
id: string,
workspaceId: string,
userId: string,
updateEventDto: UpdateEventDto
): Promise<EventWithRelations> {
// Verify event exists
const existingEvent = await this.prisma.event.findUnique({
where: { id, workspaceId },
@@ -208,7 +230,7 @@ export class EventsService {
/**
* Delete an event
*/
async remove(id: string, workspaceId: string, userId: string) {
async remove(id: string, workspaceId: string, userId: string): Promise<void> {
// Verify event exists
const event = await this.prisma.event.findUnique({
where: { id, workspaceId },

View File

@@ -1,10 +1,20 @@
import { Injectable, NotFoundException } from "@nestjs/common";
import { Prisma } from "@prisma/client";
import { Prisma, Idea } from "@prisma/client";
import { PrismaService } from "../prisma/prisma.service";
import { ActivityService } from "../activity/activity.service";
import { IdeaStatus } from "@prisma/client";
import type { CreateIdeaDto, CaptureIdeaDto, UpdateIdeaDto, QueryIdeasDto } from "./dto";
type IdeaWithRelations = Idea & {
creator: { id: string; name: string; email: string };
domain: { id: string; name: string; color: string | null } | null;
project: { id: string; name: string; color: string | null } | null;
};
type IdeaCaptured = Idea & {
creator: { id: string; name: string; email: string };
};
/**
* Service for managing ideas
*/
@@ -18,7 +28,11 @@ export class IdeasService {
/**
* Create a new idea
*/
async create(workspaceId: string, userId: string, createIdeaDto: CreateIdeaDto) {
async create(
workspaceId: string,
userId: string,
createIdeaDto: CreateIdeaDto
): Promise<IdeaWithRelations> {
const domainConnection = createIdeaDto.domainId
? { connect: { id: createIdeaDto.domainId } }
: undefined;
@@ -70,7 +84,11 @@ export class IdeasService {
* Quick capture - create an idea with minimal fields
* Optimized for rapid idea capture from the front-end
*/
async capture(workspaceId: string, userId: string, captureIdeaDto: CaptureIdeaDto) {
async capture(
workspaceId: string,
userId: string,
captureIdeaDto: CaptureIdeaDto
): Promise<IdeaCaptured> {
const data: Prisma.IdeaCreateInput = {
workspace: { connect: { id: workspaceId } },
creator: { connect: { id: userId } },
@@ -103,7 +121,15 @@ export class IdeasService {
/**
* Get paginated ideas with filters
*/
async findAll(query: QueryIdeasDto) {
async findAll(query: QueryIdeasDto): Promise<{
data: IdeaWithRelations[];
meta: {
total: number;
page: number;
limit: number;
totalPages: number;
};
}> {
const page = query.page ?? 1;
const limit = query.limit ?? 50;
const skip = (page - 1) * limit;
@@ -177,7 +203,7 @@ export class IdeasService {
/**
* Get a single idea by ID
*/
async findOne(id: string, workspaceId: string) {
async findOne(id: string, workspaceId: string): Promise<IdeaWithRelations> {
const idea = await this.prisma.idea.findUnique({
where: {
id,
@@ -206,7 +232,12 @@ export class IdeasService {
/**
* Update an idea
*/
async update(id: string, workspaceId: string, userId: string, updateIdeaDto: UpdateIdeaDto) {
async update(
id: string,
workspaceId: string,
userId: string,
updateIdeaDto: UpdateIdeaDto
): Promise<IdeaWithRelations> {
// Verify idea exists
const existingIdea = await this.prisma.idea.findUnique({
where: { id, workspaceId },
@@ -265,7 +296,7 @@ export class IdeasService {
/**
* Delete an idea
*/
async remove(id: string, workspaceId: string, userId: string) {
async remove(id: string, workspaceId: string, userId: string): Promise<void> {
// Verify idea exists
const idea = await this.prisma.idea.findUnique({
where: { id, workspaceId },

View File

@@ -1,5 +1,5 @@
import { Injectable, NotFoundException } from "@nestjs/common";
import { Prisma } from "@prisma/client";
import { Prisma, UserLayout } from "@prisma/client";
import { PrismaService } from "../prisma/prisma.service";
import type { CreateLayoutDto, UpdateLayoutDto } from "./dto";
@@ -13,7 +13,7 @@ export class LayoutsService {
/**
* Get all layouts for a user
*/
async findAll(workspaceId: string, userId: string) {
async findAll(workspaceId: string, userId: string): Promise<UserLayout[]> {
return this.prisma.userLayout.findMany({
where: {
workspaceId,
@@ -29,7 +29,7 @@ export class LayoutsService {
/**
* Get the default layout for a user
*/
async findDefault(workspaceId: string, userId: string) {
async findDefault(workspaceId: string, userId: string): Promise<UserLayout> {
const layout = await this.prisma.userLayout.findFirst({
where: {
workspaceId,
@@ -63,7 +63,7 @@ export class LayoutsService {
/**
* Get a single layout by ID
*/
async findOne(id: string, workspaceId: string, userId: string) {
async findOne(id: string, workspaceId: string, userId: string): Promise<UserLayout> {
const layout = await this.prisma.userLayout.findUnique({
where: {
id,
@@ -82,7 +82,11 @@ export class LayoutsService {
/**
* Create a new layout
*/
async create(workspaceId: string, userId: string, createLayoutDto: CreateLayoutDto) {
async create(
workspaceId: string,
userId: string,
createLayoutDto: CreateLayoutDto
): Promise<UserLayout> {
// Use transaction to ensure atomicity when setting default
return this.prisma.$transaction(async (tx) => {
// If setting as default, unset other defaults first
@@ -114,7 +118,12 @@ export class LayoutsService {
/**
* Update a layout
*/
async update(id: string, workspaceId: string, userId: string, updateLayoutDto: UpdateLayoutDto) {
async update(
id: string,
workspaceId: string,
userId: string,
updateLayoutDto: UpdateLayoutDto
): Promise<UserLayout> {
// Use transaction to ensure atomicity when setting default
return this.prisma.$transaction(async (tx) => {
// Verify layout exists
@@ -163,7 +172,7 @@ export class LayoutsService {
/**
* Delete a layout
*/
async remove(id: string, workspaceId: string, userId: string) {
async remove(id: string, workspaceId: string, userId: string): Promise<void> {
// Verify layout exists
const layout = await this.prisma.userLayout.findUnique({
where: { id, workspaceId, userId },

View File

@@ -1,10 +1,33 @@
import { Injectable, NotFoundException } from "@nestjs/common";
import { Prisma } from "@prisma/client";
import { Prisma, Project } from "@prisma/client";
import { PrismaService } from "../prisma/prisma.service";
import { ActivityService } from "../activity/activity.service";
import { ProjectStatus } from "@prisma/client";
import type { CreateProjectDto, UpdateProjectDto, QueryProjectsDto } from "./dto";
type ProjectWithRelations = Project & {
creator: { id: string; name: string; email: string };
_count: { tasks: number; events: number };
};
type ProjectWithDetails = Project & {
creator: { id: string; name: string; email: string };
tasks: {
id: string;
title: string;
status: string;
priority: string;
dueDate: Date | null;
}[];
events: {
id: string;
title: string;
startTime: Date;
endTime: Date | null;
}[];
_count: { tasks: number; events: number };
};
/**
* Service for managing projects
*/
@@ -18,7 +41,11 @@ export class ProjectsService {
/**
* Create a new project
*/
async create(workspaceId: string, userId: string, createProjectDto: CreateProjectDto) {
async create(
workspaceId: string,
userId: string,
createProjectDto: CreateProjectDto
): Promise<ProjectWithRelations> {
const data: Prisma.ProjectCreateInput = {
name: createProjectDto.name,
description: createProjectDto.description ?? null,
@@ -56,7 +83,15 @@ export class ProjectsService {
/**
* Get paginated projects with filters
*/
async findAll(query: QueryProjectsDto) {
async findAll(query: QueryProjectsDto): Promise<{
data: ProjectWithRelations[];
meta: {
total: number;
page: number;
limit: number;
totalPages: number;
};
}> {
const page = query.page ?? 1;
const limit = query.limit ?? 50;
const skip = (page - 1) * limit;
@@ -117,7 +152,7 @@ export class ProjectsService {
/**
* Get a single project by ID
*/
async findOne(id: string, workspaceId: string) {
async findOne(id: string, workspaceId: string): Promise<ProjectWithDetails> {
const project = await this.prisma.project.findUnique({
where: {
id,
@@ -167,7 +202,7 @@ export class ProjectsService {
workspaceId: string,
userId: string,
updateProjectDto: UpdateProjectDto
) {
): Promise<ProjectWithRelations> {
// Verify project exists
const existingProject = await this.prisma.project.findUnique({
where: { id, workspaceId },
@@ -217,7 +252,7 @@ export class ProjectsService {
/**
* Delete a project
*/
async remove(id: string, workspaceId: string, userId: string) {
async remove(id: string, workspaceId: string, userId: string): Promise<void> {
// Verify project exists
const project = await this.prisma.project.findUnique({
where: { id, workspaceId },

View File

@@ -0,0 +1,394 @@
import { describe, it, expect, beforeEach, vi } from "vitest";
import { Test, TestingModule } from "@nestjs/testing";
import { RunnerJobsService } from "./runner-jobs.service";
import { PrismaService } from "../prisma/prisma.service";
import { BullMqService } from "../bullmq/bullmq.service";
import { RunnerJobStatus } from "@prisma/client";
import { ConflictException, BadRequestException } from "@nestjs/common";
/**
* Concurrency tests for RunnerJobsService
* These tests verify that race conditions in job status updates are properly handled
*/
describe("RunnerJobsService - Concurrency", () => {
let service: RunnerJobsService;
let prisma: PrismaService;
const mockBullMqService = {
addJob: vi.fn(),
getQueue: vi.fn(),
};
beforeEach(async () => {
const module: TestingModule = await Test.createTestingModule({
providers: [
RunnerJobsService,
{
provide: PrismaService,
useValue: {
runnerJob: {
findUnique: vi.fn(),
update: vi.fn(),
updateMany: vi.fn(),
},
},
},
{
provide: BullMqService,
useValue: mockBullMqService,
},
],
}).compile();
service = module.get<RunnerJobsService>(RunnerJobsService);
prisma = module.get<PrismaService>(PrismaService);
vi.clearAllMocks();
});
describe("concurrent status updates", () => {
it("should detect concurrent status update conflict using version field", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
// Mock job with version 1
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
startedAt: new Date(),
};
// First findUnique returns job with version 1
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
// updateMany returns 0 (no rows updated - version mismatch)
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValue({ count: 0 });
// Should throw ConflictException when concurrent update detected
await expect(
service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED)
).rejects.toThrow(ConflictException);
// Verify updateMany was called with version check
expect(prisma.runnerJob.updateMany).toHaveBeenCalledWith(
expect.objectContaining({
where: expect.objectContaining({
id: jobId,
workspaceId,
version: 1,
}),
})
);
});
it("should successfully update when no concurrent conflict exists", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
startedAt: new Date(),
};
const updatedJob = {
...mockJob,
status: RunnerJobStatus.COMPLETED,
version: 2,
completedAt: new Date(),
};
// First call for initial read
vi.mocked(prisma.runnerJob.findUnique)
.mockResolvedValueOnce(mockJob as any)
// Second call after updateMany succeeds
.mockResolvedValueOnce(updatedJob as any);
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValue({ count: 1 });
const result = await service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED);
expect(result.status).toBe(RunnerJobStatus.COMPLETED);
expect(result.version).toBe(2);
});
it("should retry on conflict and succeed on second attempt", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJobV1 = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
};
const mockJobV2 = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 2,
};
const updatedJob = {
...mockJobV2,
status: RunnerJobStatus.COMPLETED,
version: 3,
completedAt: new Date(),
};
// First attempt: version 1, updateMany returns 0 (conflict)
vi.mocked(prisma.runnerJob.findUnique)
.mockResolvedValueOnce(mockJobV1 as any) // Initial read
.mockResolvedValueOnce(mockJobV2 as any) // Retry read
.mockResolvedValueOnce(updatedJob as any); // Final read after update
vi.mocked(prisma.runnerJob.updateMany)
.mockResolvedValueOnce({ count: 0 }) // First attempt fails
.mockResolvedValueOnce({ count: 1 }); // Retry succeeds
const result = await service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED);
expect(result.status).toBe(RunnerJobStatus.COMPLETED);
expect(prisma.runnerJob.updateMany).toHaveBeenCalledTimes(2);
});
});
describe("concurrent progress updates", () => {
it("should detect concurrent progress update conflict", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
progressPercent: 50,
version: 5,
};
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValue({ count: 0 });
await expect(service.updateProgress(jobId, workspaceId, 75)).rejects.toThrow(
ConflictException
);
});
it("should handle rapid sequential progress updates", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
// Simulate 5 rapid progress updates
const progressValues = [20, 40, 60, 80, 100];
let version = 1;
for (const progress of progressValues) {
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
progressPercent: progress - 20,
version,
};
const updatedJob = {
...mockJob,
progressPercent: progress,
version: version + 1,
};
vi.mocked(prisma.runnerJob.findUnique)
.mockResolvedValueOnce(mockJob as any)
.mockResolvedValueOnce(updatedJob as any);
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValueOnce({ count: 1 });
const result = await service.updateProgress(jobId, workspaceId, progress);
expect(result.progressPercent).toBe(progress);
expect(result.version).toBe(version + 1);
version++;
}
});
});
describe("concurrent completion", () => {
it("should prevent double completion with different results", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
startedAt: new Date(),
};
const updatedJob = {
...mockJob,
status: RunnerJobStatus.COMPLETED,
version: 2,
result: { outcome: "success-A" },
completedAt: new Date(),
};
// Test first completion (succeeds)
vi.mocked(prisma.runnerJob.findUnique)
.mockResolvedValueOnce(mockJob as any) // First completion - initial read
.mockResolvedValueOnce(updatedJob as any); // First completion - after update
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValueOnce({ count: 1 });
const result1 = await service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED, {
result: { outcome: "success-A" },
});
expect(result1.status).toBe(RunnerJobStatus.COMPLETED);
// Test second completion (fails due to version mismatch - will retry 3 times)
vi.mocked(prisma.runnerJob.findUnique)
.mockResolvedValueOnce(mockJob as any) // Attempt 1: Reads stale version
.mockResolvedValueOnce(mockJob as any) // Attempt 2: Retry reads stale version
.mockResolvedValueOnce(mockJob as any); // Attempt 3: Final retry reads stale version
vi.mocked(prisma.runnerJob.updateMany)
.mockResolvedValueOnce({ count: 0 }) // Attempt 1: Version conflict
.mockResolvedValueOnce({ count: 0 }) // Attempt 2: Version conflict
.mockResolvedValueOnce({ count: 0 }); // Attempt 3: Version conflict
await expect(
service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED, {
result: { outcome: "success-B" },
})
).rejects.toThrow(ConflictException);
});
});
describe("concurrent cancel operations", () => {
it("should handle concurrent cancel attempts", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
};
const cancelledJob = {
...mockJob,
status: RunnerJobStatus.CANCELLED,
version: 2,
completedAt: new Date(),
};
// Setup mocks
vi.mocked(prisma.runnerJob.findUnique)
.mockResolvedValueOnce(mockJob as any) // First cancel - initial read
.mockResolvedValueOnce(cancelledJob as any) // First cancel - after update
.mockResolvedValueOnce(cancelledJob as any); // Second cancel - sees already cancelled
vi.mocked(prisma.runnerJob.updateMany).mockResolvedValueOnce({ count: 1 });
const result1 = await service.cancel(jobId, workspaceId);
expect(result1.status).toBe(RunnerJobStatus.CANCELLED);
// Second cancel attempt should fail (job already cancelled)
await expect(service.cancel(jobId, workspaceId)).rejects.toThrow(BadRequestException);
});
});
describe("retry mechanism", () => {
it("should retry up to max attempts on version conflicts", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
};
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
// All retry attempts fail
vi.mocked(prisma.runnerJob.updateMany)
.mockResolvedValueOnce({ count: 0 })
.mockResolvedValueOnce({ count: 0 })
.mockResolvedValueOnce({ count: 0 });
// Should throw after max retries (3)
await expect(
service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED)
).rejects.toThrow(ConflictException);
expect(prisma.runnerJob.updateMany).toHaveBeenCalledTimes(3);
});
it("should use exponential backoff between retries", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.RUNNING,
version: 1,
};
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
const updateManyCalls: number[] = [];
vi.mocked(prisma.runnerJob.updateMany).mockImplementation(async () => {
updateManyCalls.push(Date.now());
return { count: 0 };
});
await expect(
service.updateStatus(jobId, workspaceId, RunnerJobStatus.COMPLETED)
).rejects.toThrow(ConflictException);
// Verify delays between calls increase (exponential backoff)
expect(updateManyCalls.length).toBe(3);
if (updateManyCalls.length >= 3) {
const delay1 = updateManyCalls[1] - updateManyCalls[0];
const delay2 = updateManyCalls[2] - updateManyCalls[1];
// Second delay should be >= first delay (exponential)
expect(delay2).toBeGreaterThanOrEqual(delay1);
}
});
});
describe("status transition validation with concurrency", () => {
it("should prevent invalid transitions even under concurrent updates", async () => {
const jobId = "job-123";
const workspaceId = "workspace-123";
// Job is already completed
const mockJob = {
id: jobId,
workspaceId,
status: RunnerJobStatus.COMPLETED,
version: 5,
completedAt: new Date(),
};
vi.mocked(prisma.runnerJob.findUnique).mockResolvedValue(mockJob as any);
// Should reject transition from COMPLETED to RUNNING
await expect(
service.updateStatus(jobId, workspaceId, RunnerJobStatus.RUNNING)
).rejects.toThrow();
});
});
});

View File

@@ -19,6 +19,7 @@ describe("RunnerJobsService", () => {
count: vi.fn(),
findUnique: vi.fn(),
update: vi.fn(),
updateMany: vi.fn(),
},
jobEvent: {
findMany: vi.fn(),

View File

@@ -4,6 +4,7 @@ import { Response } from "express";
import { PrismaService } from "../prisma/prisma.service";
import { BullMqService } from "../bullmq/bullmq.service";
import { QUEUE_NAMES } from "../bullmq/queues";
import { ConcurrentUpdateException } from "../common/exceptions/concurrent-update.exception";
import type { CreateJobDto, QueryJobsDto } from "./dto";
/**
@@ -144,37 +145,57 @@ export class RunnerJobsService {
}
/**
* Cancel a running or queued job
* Cancel a running or queued job with optimistic locking
*/
async cancel(id: string, workspaceId: string) {
// Verify job exists
const existingJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
return this.retryOnConflict(async () => {
// Verify job exists
const existingJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
if (!existingJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found`);
}
// Check if job can be cancelled
if (
existingJob.status === RunnerJobStatus.COMPLETED ||
existingJob.status === RunnerJobStatus.CANCELLED ||
existingJob.status === RunnerJobStatus.FAILED
) {
throw new BadRequestException(`Cannot cancel job with status ${existingJob.status}`);
}
// Update job status to cancelled with version check
const result = await this.prisma.runnerJob.updateMany({
where: {
id,
workspaceId,
version: existingJob.version,
},
data: {
status: RunnerJobStatus.CANCELLED,
completedAt: new Date(),
version: { increment: 1 },
},
});
if (result.count === 0) {
throw new ConcurrentUpdateException("RunnerJob", id, existingJob.version);
}
// Fetch and return updated job
const job = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
if (!job) {
throw new NotFoundException(`RunnerJob with ID ${id} not found after cancel`);
}
return job;
});
if (!existingJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found`);
}
// Check if job can be cancelled
if (
existingJob.status === RunnerJobStatus.COMPLETED ||
existingJob.status === RunnerJobStatus.CANCELLED ||
existingJob.status === RunnerJobStatus.FAILED
) {
throw new BadRequestException(`Cannot cancel job with status ${existingJob.status}`);
}
// Update job status to cancelled
const job = await this.prisma.runnerJob.update({
where: { id, workspaceId },
data: {
status: RunnerJobStatus.CANCELLED,
completedAt: new Date(),
},
});
return job;
}
/**
@@ -413,74 +434,179 @@ export class RunnerJobsService {
}
/**
* Update job status
* Retry wrapper for optimistic locking conflicts
* Retries the operation up to maxRetries times with exponential backoff
*/
private async retryOnConflict<T>(operation: () => Promise<T>, maxRetries = 3): Promise<T> {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
if (error instanceof ConcurrentUpdateException && attempt < maxRetries - 1) {
// Exponential backoff: 100ms, 200ms, 400ms
const delayMs = Math.pow(2, attempt) * 100;
await new Promise((resolve) => setTimeout(resolve, delayMs));
continue;
}
throw error;
}
}
throw new Error("Retry logic failed unexpectedly");
}
/**
* Update job status with optimistic locking
*/
async updateStatus(
id: string,
workspaceId: string,
status: RunnerJobStatus,
data?: { result?: unknown; error?: string }
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.update>>> {
// Verify job exists
const existingJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.findUnique>>> {
return this.retryOnConflict(async () => {
// Read current job state
const existingJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
if (!existingJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found`);
}
if (!existingJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found`);
}
const updateData: Prisma.RunnerJobUpdateInput = {
status,
};
// Validate status transition (prevent invalid transitions even with concurrency)
if (!this.isValidStatusTransition(existingJob.status, status)) {
throw new BadRequestException(
`Invalid status transition from ${existingJob.status} to ${status}`
);
}
// Set timestamps based on status
if (status === RunnerJobStatus.RUNNING && !existingJob.startedAt) {
updateData.startedAt = new Date();
}
const updateData: Prisma.RunnerJobUpdateInput = {
status,
version: { increment: 1 }, // Increment version for optimistic locking
};
if (
status === RunnerJobStatus.COMPLETED ||
status === RunnerJobStatus.FAILED ||
status === RunnerJobStatus.CANCELLED
) {
updateData.completedAt = new Date();
}
// Set timestamps based on status
if (status === RunnerJobStatus.RUNNING && !existingJob.startedAt) {
updateData.startedAt = new Date();
}
// Add optional data
if (data?.result !== undefined) {
updateData.result = data.result as Prisma.InputJsonValue;
}
if (data?.error !== undefined) {
updateData.error = data.error;
}
if (
status === RunnerJobStatus.COMPLETED ||
status === RunnerJobStatus.FAILED ||
status === RunnerJobStatus.CANCELLED
) {
updateData.completedAt = new Date();
}
return this.prisma.runnerJob.update({
where: { id, workspaceId },
data: updateData,
// Add optional data
if (data?.result !== undefined) {
updateData.result = data.result as Prisma.InputJsonValue;
}
if (data?.error !== undefined) {
updateData.error = data.error;
}
// Use updateMany with version check for optimistic locking
const result = await this.prisma.runnerJob.updateMany({
where: {
id,
workspaceId,
version: existingJob.version, // Only update if version matches
},
data: updateData,
});
// If count is 0, version mismatch (concurrent update detected)
if (result.count === 0) {
throw new ConcurrentUpdateException("RunnerJob", id, existingJob.version);
}
// Fetch and return updated job
const updatedJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
if (!updatedJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found after update`);
}
return updatedJob;
});
}
/**
* Update job progress percentage
* Validate status transitions
*/
private isValidStatusTransition(
currentStatus: RunnerJobStatus,
newStatus: RunnerJobStatus
): boolean {
// Define valid transitions
const validTransitions: Record<RunnerJobStatus, RunnerJobStatus[]> = {
[RunnerJobStatus.PENDING]: [
RunnerJobStatus.QUEUED,
RunnerJobStatus.RUNNING,
RunnerJobStatus.CANCELLED,
],
[RunnerJobStatus.QUEUED]: [RunnerJobStatus.RUNNING, RunnerJobStatus.CANCELLED],
[RunnerJobStatus.RUNNING]: [
RunnerJobStatus.COMPLETED,
RunnerJobStatus.FAILED,
RunnerJobStatus.CANCELLED,
],
[RunnerJobStatus.COMPLETED]: [],
[RunnerJobStatus.FAILED]: [],
[RunnerJobStatus.CANCELLED]: [],
};
return validTransitions[currentStatus].includes(newStatus);
}
/**
* Update job progress percentage with optimistic locking
*/
async updateProgress(
id: string,
workspaceId: string,
progressPercent: number
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.update>>> {
// Verify job exists
const existingJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
): Promise<Awaited<ReturnType<typeof this.prisma.runnerJob.findUnique>>> {
return this.retryOnConflict(async () => {
// Read current job state
const existingJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
if (!existingJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found`);
}
if (!existingJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found`);
}
return this.prisma.runnerJob.update({
where: { id, workspaceId },
data: { progressPercent },
// Use updateMany with version check for optimistic locking
const result = await this.prisma.runnerJob.updateMany({
where: {
id,
workspaceId,
version: existingJob.version,
},
data: {
progressPercent,
version: { increment: 1 },
},
});
if (result.count === 0) {
throw new ConcurrentUpdateException("RunnerJob", id, existingJob.version);
}
// Fetch and return updated job
const updatedJob = await this.prisma.runnerJob.findUnique({
where: { id, workspaceId },
});
if (!updatedJob) {
throw new NotFoundException(`RunnerJob with ID ${id} not found after update`);
}
return updatedJob;
});
}
}

View File

@@ -1,4 +1,13 @@
import { IsString, IsUUID, IsOptional, IsObject, ValidateNested, MinLength, MaxLength, IsEnum } from "class-validator";
import {
IsString,
IsUUID,
IsOptional,
IsObject,
ValidateNested,
MinLength,
MaxLength,
IsEnum,
} from "class-validator";
import { Type } from "class-transformer";
/**

View File

@@ -1,10 +1,19 @@
import { Injectable, NotFoundException } from "@nestjs/common";
import { Prisma } from "@prisma/client";
import { Prisma, Task } from "@prisma/client";
import { PrismaService } from "../prisma/prisma.service";
import { ActivityService } from "../activity/activity.service";
import { TaskStatus, TaskPriority } from "@prisma/client";
import type { CreateTaskDto, UpdateTaskDto, QueryTasksDto } from "./dto";
type TaskWithRelations = Task & {
assignee: { id: string; name: string; email: string } | null;
creator: { id: string; name: string; email: string };
project: { id: string; name: string; color: string | null } | null;
subtasks?: (Task & {
assignee: { id: string; name: string; email: string } | null;
})[];
};
/**
* Service for managing tasks
*/
@@ -18,7 +27,11 @@ export class TasksService {
/**
* Create a new task
*/
async create(workspaceId: string, userId: string, createTaskDto: CreateTaskDto) {
async create(
workspaceId: string,
userId: string,
createTaskDto: CreateTaskDto
): Promise<Omit<TaskWithRelations, "subtasks">> {
const assigneeConnection = createTaskDto.assigneeId
? { connect: { id: createTaskDto.assigneeId } }
: undefined;
@@ -79,7 +92,15 @@ export class TasksService {
/**
* Get paginated tasks with filters
*/
async findAll(query: QueryTasksDto) {
async findAll(query: QueryTasksDto): Promise<{
data: Omit<TaskWithRelations, "subtasks">[];
meta: {
total: number;
page: number;
limit: number;
totalPages: number;
};
}> {
const page = query.page ?? 1;
const limit = query.limit ?? 50;
const skip = (page - 1) * limit;
@@ -159,7 +180,7 @@ export class TasksService {
/**
* Get a single task by ID
*/
async findOne(id: string, workspaceId: string) {
async findOne(id: string, workspaceId: string): Promise<TaskWithRelations> {
const task = await this.prisma.task.findUnique({
where: {
id,
@@ -195,7 +216,12 @@ export class TasksService {
/**
* Update a task
*/
async update(id: string, workspaceId: string, userId: string, updateTaskDto: UpdateTaskDto) {
async update(
id: string,
workspaceId: string,
userId: string,
updateTaskDto: UpdateTaskDto
): Promise<Omit<TaskWithRelations, "subtasks">> {
// Verify task exists
const existingTask = await this.prisma.task.findUnique({
where: { id, workspaceId },
@@ -305,7 +331,7 @@ export class TasksService {
/**
* Delete a task
*/
async remove(id: string, workspaceId: string, userId: string) {
async remove(id: string, workspaceId: string, userId: string): Promise<void> {
// Verify task exists
const task = await this.prisma.task.findUnique({
where: { id, workspaceId },

630
docs/M6-ISSUE-AUDIT.md Normal file
View File

@@ -0,0 +1,630 @@
# M6-AgentOrchestration Issue Audit
**Date:** 2026-02-02
**Milestone:** M6-AgentOrchestration (0.0.6)
**Status:** 6 open / 3 closed issues
**Audit Purpose:** Review existing issues against confirmed orchestrator-in-monorepo architecture
---
## Executive Summary
**Current State:**
- M6 milestone has 9 issues (6 open, 3 closed)
- Issues are based on "ClawdBot integration" architecture
- New architecture: Orchestrator is `apps/orchestrator/` in monorepo (NOT ClawdBot)
**Key Finding:**
- **CONFLICT:** All M6 issues reference "ClawdBot" as external execution backend
- **REALITY:** Orchestrator is now an internal monorepo service at `apps/orchestrator/`
**Recommendation:**
- **Keep existing M6 issues** - they represent the control plane (Mosaic Stack's responsibility)
- **Create 34 new issues** - for the execution plane (`apps/orchestrator/` implementation)
- **Update issue descriptions** - replace "ClawdBot" references with "Orchestrator service"
---
## Architecture Comparison
### Old Architecture (Current M6 Issues)
```
Mosaic Stack (Control Plane)
ClawdBot Gateway (External service, separate repo)
Worker Agents
```
### New Architecture (Confirmed 2026-02-02)
```
Mosaic Stack Monorepo
├── apps/api/ (Control Plane - task CRUD, dispatch)
├── apps/coordinator/ (Quality gates, 50% rule)
├── apps/orchestrator/ (NEW - Execution plane)
│ ├── Agent spawning
│ ├── Task queue (Valkey/BullMQ)
│ ├── Git operations
│ ├── Health monitoring
│ └── Killswitch responder
└── apps/web/ (Dashboard, agent monitoring)
```
**Key Difference:** Orchestrator is IN the monorepo at `apps/orchestrator/`, not external "ClawdBot".
---
## Existing M6 Issues Analysis
### Epic
#### #95 [EPIC] Agent Orchestration - Persistent task management
- **Status:** Open
- **Architecture:** Based on ClawdBot integration
- **Recommendation:** **UPDATE** - Keep as overall epic, but update description:
- Replace "ClawdBot" with "Orchestrator service (`apps/orchestrator/`)"
- Update delegation model to reflect monorepo architecture
- Reference `ORCHESTRATOR-MONOREPO-SETUP.md` instead of `CLAWDBOT-INTEGRATION.md`
- **Action:** Update issue description
---
### Phase 1: Foundation (Control Plane)
#### #96 [ORCH-001] Agent Task Database Schema
- **Status:** Closed ✅
- **Scope:** Database schema for task orchestration
- **Architecture Fit:** ✅ **KEEP AS-IS**
- **Reason:** Control plane (Mosaic Stack) still needs task database
- **Notes:**
- `agent_tasks` table - ✅ Still needed
- `agent_task_logs` - ✅ Still needed
- `clawdbot_backends` - ⚠️ Rename to `orchestrator_instances` (if multi-instance)
- **Action:** No changes needed (already closed)
#### #97 [ORCH-002] Task CRUD API
- **Status:** Closed ✅
- **Scope:** REST API for task management
- **Architecture Fit:** ✅ **KEEP AS-IS**
- **Reason:** Control plane API (Mosaic Stack) manages tasks
- **Notes:**
- POST/GET/PATCH endpoints - ✅ Still needed
- Dispatch handled in #99 - ✅ Correct
- **Action:** No changes needed (already closed)
---
### Phase 2: Integration (Control Plane ↔ Execution Plane)
#### #98 [ORCH-003] Valkey Integration
- **Status:** Closed ✅
- **Scope:** Valkey for runtime state
- **Architecture Fit:** ✅ **KEEP AS-IS**
- **Reason:** Shared state between control plane and orchestrator
- **Notes:**
- Task status caching - ✅ Control plane needs this
- Pub/Sub for progress - ✅ Still needed
- Backend health cache - ⚠️ Update to "Orchestrator health cache"
- **Action:** No changes needed (already closed)
#### #99 [ORCH-004] Task Dispatcher Service
- **Status:** Open
- **Scope:** Dispatch tasks to execution backend
- **Architecture Fit:** ⚠️ **UPDATE REQUIRED**
- **Current Description:** "Dispatcher service for delegating work to ClawdBot"
- **Should Be:** "Dispatcher service for delegating work to Orchestrator (`apps/orchestrator/`)"
- **Changes Needed:**
- Replace "ClawdBot Gateway API client" with "Orchestrator API client"
- Update endpoint references (ClawdBot → Orchestrator)
- Internal service call, not external HTTP (unless orchestrator runs separately)
- **Action:** Update issue description, replace ClawdBot → Orchestrator
#### #102 [ORCH-007] Gateway Integration
- **Status:** Open
- **Scope:** Integration with execution backend
- **Architecture Fit:** ⚠️ **UPDATE REQUIRED**
- **Current Description:** "Core integration with ClawdBot Gateway API"
- **Should Be:** "Integration with Orchestrator service (`apps/orchestrator/`)"
- **Changes Needed:**
- API endpoints: `/orchestrator/agents/spawn`, `/orchestrator/agents/kill`
- Monorepo service-to-service communication (not external HTTP, or internal HTTP)
- Session management handled by orchestrator
- **Action:** Update issue description, replace ClawdBot → Orchestrator
---
### Phase 3: Failure Handling (Control Plane)
#### #100 [ORCH-005] ClawdBot Failure Handling
- **Status:** Open
- **Scope:** Handle failures reported by execution backend
- **Architecture Fit:** ⚠️ **UPDATE REQUIRED**
- **Current Description:** "Handle failures reported by ClawdBot"
- **Should Be:** "Handle failures reported by Orchestrator"
- **Changes Needed:**
- Callback handler receives failures from orchestrator
- Retry/escalation logic - ✅ Still valid
- Orchestrator reports failures, control plane decides retry
- **Action:** Update issue description, replace ClawdBot → Orchestrator
---
### Phase 4: Observability (Control Plane UI)
#### #101 [ORCH-006] Task Progress UI
- **Status:** Open
- **Scope:** Dashboard for monitoring task execution
- **Architecture Fit:** ✅ **KEEP - MINOR UPDATES**
- **Current Description:** Dashboard with kill controls
- **Should Be:** Same, but backend is Orchestrator
- **Changes Needed:**
- Backend health indicators - ⚠️ Update to "Orchestrator health"
- Real-time progress from Orchestrator via Valkey pub/sub - ✅ Correct
- **Action:** Minor update to issue description (backend = Orchestrator)
---
### Safety Critical
#### #114 [ORCH-008] Kill Authority Implementation
- **Status:** Open
- **Scope:** Control plane kill authority over execution backend
- **Architecture Fit:** ✅ **KEEP - CRITICAL**
- **Current Description:** "Mosaic Stack MUST retain the ability to terminate any ClawdBot operation"
- **Should Be:** "Mosaic Stack MUST retain the ability to terminate any Orchestrator operation"
- **Changes Needed:**
- Endpoints: `/api/orchestrator/tasks/:id/kill` (not `/api/clawdbot/...`)
- Kill signal to orchestrator service
- Audit trail - ✅ Still valid
- **Action:** Update issue description, replace ClawdBot → Orchestrator
---
## New Orchestrator Issues (Execution Plane)
The existing M6 issues cover the **control plane** (Mosaic Stack). We need **34 new issues** for the **execution plane** (`apps/orchestrator/`).
Source: `ORCHESTRATOR-MONOREPO-SETUP.md` Section 10.
### Foundation (Days 1-2)
1. **[ORCH-101] Set up apps/orchestrator structure**
- Labels: `task`, `setup`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Create directory structure, package.json, tsconfig.json
- Dependencies: None
- Conflicts: None (new code)
2. **[ORCH-102] Create Fastify server with health checks**
- Labels: `feature`, `api`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Basic HTTP server with `/health` endpoint
- Dependencies: #[ORCH-101]
- Conflicts: None
3. **[ORCH-103] Docker Compose integration for orchestrator**
- Labels: `task`, `infrastructure`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Add orchestrator service to docker-compose.yml
- Dependencies: #[ORCH-101]
- Conflicts: None
4. **[ORCH-104] Monorepo build pipeline for orchestrator**
- Labels: `task`, `infrastructure`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Update turbo.json, ensure orchestrator builds correctly
- Dependencies: #[ORCH-101]
- Conflicts: None
### Agent Spawning (Days 3-4)
5. **[ORCH-105] Implement agent spawner (Claude SDK)**
- Labels: `feature`, `core`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Spawn Claude agents via Anthropic SDK
- Dependencies: #[ORCH-102]
- Conflicts: None
6. **[ORCH-106] Docker sandbox isolation**
- Labels: `feature`, `security`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Isolate agents in Docker containers
- Dependencies: #[ORCH-105]
- Conflicts: None
7. **[ORCH-107] Valkey client and state management**
- Labels: `feature`, `core`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Valkey client, state schema implementation
- Dependencies: #98 (Valkey Integration), #[ORCH-102]
- Conflicts: None (orchestrator's own Valkey client)
8. **[ORCH-108] BullMQ task queue**
- Labels: `feature`, `core`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Task queue with priority, retry logic
- Dependencies: #[ORCH-107]
- Conflicts: None
9. **[ORCH-109] Agent lifecycle management**
- Labels: `feature`, `core`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Manage agent states (spawning, running, completed, failed)
- Dependencies: #[ORCH-105], #[ORCH-108]
- Conflicts: None
### Git Integration (Days 5-6)
10. **[ORCH-110] Git operations (clone, commit, push)**
- Labels: `feature`, `git`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Implement git-operations.ts with simple-git
- Dependencies: #[ORCH-105]
- Conflicts: None
11. **[ORCH-111] Git worktree management**
- Labels: `feature`, `git`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Create and manage git worktrees for isolation
- Dependencies: #[ORCH-110]
- Conflicts: None
12. **[ORCH-112] Conflict detection**
- Labels: `feature`, `git`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Detect merge conflicts before pushing
- Dependencies: #[ORCH-110]
- Conflicts: None
### Coordinator Integration (Days 7-8)
13. **[ORCH-113] Coordinator API client**
- Labels: `feature`, `integration`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: HTTP client for coordinator callbacks
- Dependencies: #[ORCH-102]
- Related: Existing coordinator in `apps/coordinator/`
14. **[ORCH-114] Quality gate callbacks**
- Labels: `feature`, `quality`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Call coordinator quality gates (pre-commit, post-commit)
- Dependencies: #[ORCH-113]
- Related: Coordinator implements gates
15. **[ORCH-115] Task dispatch from coordinator**
- Labels: `feature`, `integration`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Coordinator dispatches tasks to orchestrator
- Dependencies: #99 (Task Dispatcher), #[ORCH-113]
- Conflicts: None (complements #99)
16. **[ORCH-116] 50% rule enforcement**
- Labels: `feature`, `quality`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Mechanical gates + AI confirmation
- Dependencies: #[ORCH-114]
- Related: Coordinator enforces, orchestrator calls
### Killswitch + Security (Days 9-10)
17. **[ORCH-117] Killswitch implementation**
- Labels: `feature`, `security`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Kill single agent or all agents (emergency stop)
- Dependencies: #[ORCH-109]
- Related: #114 (Kill Authority in control plane)
18. **[ORCH-118] Resource cleanup**
- Labels: `task`, `infrastructure`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Clean up Docker containers, git worktrees
- Dependencies: #[ORCH-117]
- Conflicts: None
19. **[ORCH-119] Docker security hardening**
- Labels: `security`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Non-root user, minimal image, security scanning
- Dependencies: #[ORCH-106]
- Conflicts: None
20. **[ORCH-120] Secret scanning**
- Labels: `security`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: git-secrets integration, pre-commit hooks
- Dependencies: #[ORCH-110]
- Conflicts: None
### Quality Gates (Days 11-12)
21. **[ORCH-121] Mechanical quality gates**
- Labels: `feature`, `quality`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: TypeScript, ESLint, tests, coverage
- Dependencies: #[ORCH-114]
- Related: Coordinator has gate implementations
22. **[ORCH-122] AI agent confirmation**
- Labels: `feature`, `quality`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Independent AI agent reviews changes
- Dependencies: #[ORCH-114]
- Related: Coordinator calls AI reviewer
23. **[ORCH-123] YOLO mode (gate bypass)**
- Labels: `feature`, `configuration`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: User-configurable approval gates
- Dependencies: #[ORCH-114]
- Conflicts: None
24. **[ORCH-124] Gate configuration per-task**
- Labels: `feature`, `configuration`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Different quality gates for different tasks
- Dependencies: #[ORCH-114]
- Conflicts: None
### Testing (Days 13-14)
25. **[ORCH-125] E2E test: Full agent lifecycle**
- Labels: `test`, `e2e`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Spawn → Git → Quality → Complete
- Dependencies: All above
- Conflicts: None
26. **[ORCH-126] E2E test: Killswitch**
- Labels: `test`, `e2e`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Kill single and all agents
- Dependencies: #[ORCH-117]
- Conflicts: None
27. **[ORCH-127] E2E test: Concurrent agents**
- Labels: `test`, `e2e`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: 10 concurrent agents
- Dependencies: #[ORCH-109]
- Conflicts: None
28. **[ORCH-128] Performance testing**
- Labels: `test`, `performance`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Load testing, resource monitoring
- Dependencies: #[ORCH-125]
- Conflicts: None
29. **[ORCH-129] Documentation**
- Labels: `documentation`, `orchestrator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: API docs, architecture diagrams, runbooks
- Dependencies: All above
- Conflicts: None
### Integration Issues (Existing Apps)
30. **[ORCH-130] apps/api: Add orchestrator client**
- Labels: `feature`, `integration`, `api`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: HTTP client for orchestrator API
- Dependencies: #[ORCH-102], #99 (uses this client)
- Conflicts: None (extends #99)
31. **[ORCH-131] apps/coordinator: Add orchestrator dispatcher**
- Labels: `feature`, `integration`, `coordinator`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Dispatch tasks to orchestrator after quality pre-check
- Dependencies: #[ORCH-102], #99
- Related: Coordinator already exists
32. **[ORCH-132] apps/web: Add agent dashboard**
- Labels: `feature`, `ui`, `web`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Real-time agent status dashboard
- Dependencies: #101 (extends this), #[ORCH-102]
- Related: Extends #101
33. **[ORCH-133] docker-compose: Add orchestrator service**
- Labels: `task`, `infrastructure`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Integrate orchestrator into docker-compose.yml
- Dependencies: #[ORCH-103]
- Conflicts: None
34. **[ORCH-134] Update root documentation**
- Labels: `documentation`
- Milestone: M6-AgentOrchestration (0.0.6)
- Description: Update README, ARCHITECTURE.md
- Dependencies: #[ORCH-129]
- Conflicts: None
---
## Integration Matrix
### Existing M6 Issues (Control Plane)
| Issue | Keep? | Update? | Reason |
| -------------------------- | ----- | ------- | ------------------------------------- |
| #95 (Epic) | ✅ | ⚠️ | Update ClawdBot → Orchestrator |
| #96 (Schema) | ✅ | ✅ | Already closed, no changes |
| #97 (CRUD API) | ✅ | ✅ | Already closed, no changes |
| #98 (Valkey) | ✅ | ✅ | Already closed, no changes |
| #99 (Dispatcher) | ✅ | ⚠️ | Update ClawdBot → Orchestrator |
| #100 (Failure Handling) | ✅ | ⚠️ | Update ClawdBot → Orchestrator |
| #101 (Progress UI) | ✅ | ⚠️ | Minor update (backend = Orchestrator) |
| #102 (Gateway Integration) | ✅ | ⚠️ | Update ClawdBot → Orchestrator |
| #114 (Kill Authority) | ✅ | ⚠️ | Update ClawdBot → Orchestrator |
### New Orchestrator Issues (Execution Plane)
| Issue | Phase | Dependencies | Conflicts |
| -------------------- | ----------- | ------------ | ---------------- |
| ORCH-101 to ORCH-104 | Foundation | None | None |
| ORCH-105 to ORCH-109 | Spawning | Foundation | None |
| ORCH-110 to ORCH-112 | Git | Spawning | None |
| ORCH-113 to ORCH-116 | Coordinator | Git | None |
| ORCH-117 to ORCH-120 | Security | Coordinator | None |
| ORCH-121 to ORCH-124 | Quality | Security | None |
| ORCH-125 to ORCH-129 | Testing | All above | None |
| ORCH-130 to ORCH-134 | Integration | Testing | Extends existing |
**No conflicts.** New issues are additive (execution plane). Existing issues are control plane.
---
## Recommended Actions
### Immediate (Before Creating New Issues)
1. **Update Existing M6 Issues** (6 issues to update)
- #95: Update epic description (ClawdBot → Orchestrator service)
- #99: Update dispatcher description
- #100: Update failure handling description
- #101: Minor update (backend = Orchestrator)
- #102: Update gateway integration description
- #114: Update kill authority description
**Script:**
```bash
# For each issue, use tea CLI:
tea issues edit <issue-number> --description "<updated description>"
```
2. **Add Architecture Reference to Epic**
- Update #95 to reference:
- `ORCHESTRATOR-MONOREPO-SETUP.md`
- `ARCHITECTURE-CLARIFICATION.md`
- Remove reference to `CLAWDBOT-INTEGRATION.md` (obsolete)
### After Updates
3. **Create 34 New Orchestrator Issues**
- Use template:
```markdown
# [ORCH-XXX] Title
## Description
[What needs to be done]
## Acceptance Criteria
- [ ] Criterion 1
- [ ] Criterion 2
## Dependencies
- Blocks: #X
- Blocked by: #Y
## Technical Notes
[Implementation details from ORCHESTRATOR-MONOREPO-SETUP.md]
```
4. **Create Label: `orchestrator`**
```bash
tea labels create orchestrator --color "#FF6B35" --description "Orchestrator service (apps/orchestrator/)"
```
5. **Link Issues**
- New orchestrator issues should reference control plane issues:
- ORCH-130 extends #99 (API client for dispatcher)
- ORCH-131 extends #99 (Coordinator dispatcher)
- ORCH-132 extends #101 (Agent dashboard)
- Use "Blocks:" and "Blocked by:" in issue descriptions
---
## Issue Creation Priority
### Phase 1: Foundation (Create First)
- ORCH-101 to ORCH-104 (no dependencies)
### Phase 2: Core Features
- ORCH-105 to ORCH-109 (spawning)
- ORCH-110 to ORCH-112 (git)
- ORCH-113 to ORCH-116 (coordinator)
### Phase 3: Security & Quality
- ORCH-117 to ORCH-120 (security)
- ORCH-121 to ORCH-124 (quality)
### Phase 4: Testing & Integration
- ORCH-125 to ORCH-129 (testing)
- ORCH-130 to ORCH-134 (integration)
---
## Summary
**Existing M6 Issues: 9 total**
- **Keep:** 9 (all control plane work)
- **Update:** 6 (replace ClawdBot → Orchestrator)
- **Close:** 0 (all still valid)
**New Orchestrator Issues: 34 total**
- **Foundation:** 4 issues
- **Spawning:** 5 issues
- **Git:** 3 issues
- **Coordinator:** 4 issues
- **Security:** 4 issues
- **Quality:** 4 issues
- **Testing:** 5 issues
- **Integration:** 5 issues
**Total M6 Issues After Audit: 43 issues**
- 9 control plane (existing, updated)
- 34 execution plane (new)
**Conflicts:** None (clean separation between control plane and execution plane)
**Blockers:** None
**Questions for Jason:**
1. Approve update of existing 6 issues? (replace ClawdBot → Orchestrator)
2. Approve creation of 34 new orchestrator issues?
3. Create `orchestrator` label?
4. Any additional issues needed?
---
## Next Steps
1. ✅ Review this audit
2. ⏸️ Get Jason's approval
3. ⏸️ Update existing 6 M6 issues
4. ⏸️ Create `orchestrator` label
5. ⏸️ Create 34 new orchestrator issues
6. ⏸️ Link issues (dependencies, blocks)
7. ⏸️ Update M6 milestone (43 total issues)
**Ready to proceed?**

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:45:57
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1245_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:46:55
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1246_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:19:58
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1219_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/domains/domains.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:48:28
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-domains-domains.service.ts_20260202-1248_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/events/events.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:44:36
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-events-events.service.ts_20260202-1244_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/layouts/layouts.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:49:51
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-layouts-layouts.service.ts_20260202-1249_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/stitcher/dto/dto-validation.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:17:58
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/escalated/home-localadmin-src-mosaic-stack-apps-api-src-stitcher-dto-dto-validation.spec.ts_20260202-1217_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:45:18
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1245_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:45:23
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1245_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:45:29
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1245_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 4
**Generated:** 2026-02-02 12:45:35
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1245_4_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:45:40
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1245_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:46:02
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1246_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:46:06
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1246_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:46:10
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1246_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 4
**Generated:** 2026-02-02 12:46:14
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1246_4_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:46:18
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1246_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/activity/activity.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:47:02
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-activity-activity.service.ts_20260202-1247_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/auth/auth.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:43:18
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-auth-auth.service.ts_20260202-1243_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/auth/auth.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:43:24
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-auth-auth.service.ts_20260202-1243_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/auth/auth.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:43:29
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-auth-auth.service.ts_20260202-1243_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:37:43
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1137_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 11:37:50
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1137_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 11:37:55
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1137_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:18:20
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1218_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:18:36
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1218_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:18:45
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1218_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 4
**Generated:** 2026-02-02 12:18:53
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1218_4_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:19:01
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1219_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:19:18
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1219_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:19:26
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1219_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 4
**Generated:** 2026-02-02 12:19:39
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1219_4_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 5
**Generated:** 2026-02-02 12:19:50
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1219_5_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:20:06
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.spec.ts_20260202-1220_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:38:31
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.ts_20260202-1138_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 11:38:36
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.ts_20260202-1138_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 11:38:41
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.ts_20260202-1138_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:17:54
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.ts_20260202-1217_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:17:57
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.ts_20260202-1217_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/bridge/discord/discord.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:22:27
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-bridge-discord-discord.service.ts_20260202-1222_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/exceptions/concurrent-update.exception.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:45:27
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-exceptions-concurrent-update.exception.ts_20260202-1245_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.spec.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:43:58
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.spec.ts_20260202-1143_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:50:19
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.spec.ts_20260202-1150_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:44:48
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.ts_20260202-1144_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:49:13
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.ts_20260202-1149_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 11:49:14
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.ts_20260202-1149_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:51:15
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.ts_20260202-1151_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/api-key.guard.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 11:51:16
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-api-key.guard.ts_20260202-1151_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/guards/index.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:44:51
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-guards-index.ts_20260202-1144_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/index.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:17:48
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-index.ts_20260202-1217_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.spec.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:15:41
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.spec.ts_20260202-1215_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:16:30
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1216_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:16:57
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1216_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:17:08
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1217_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:17:28
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1217_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:17:40
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1217_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:22:11
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1222_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:22:15
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1222_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:22:19
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1222_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/common/utils/log-sanitizer.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:23:15
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-common-utils-log-sanitizer.ts_20260202-1223_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.controller.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:47:05
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.controller.spec.ts_20260202-1147_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.controller.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 11:47:24
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.controller.spec.ts_20260202-1147_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.controller.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 11:47:26
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.controller.spec.ts_20260202-1147_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.controller.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:45:31
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.controller.ts_20260202-1145_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.module.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:45:46
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.module.ts_20260202-1145_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.module.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 11:45:48
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.module.ts_20260202-1145_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.security.spec.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 11:44:13
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.security.spec.ts_20260202-1144_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.service.concurrency.spec.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:44:34
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.service.concurrency.spec.ts_20260202-1244_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:46:20
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.service.ts_20260202-1246_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:46:35
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.service.ts_20260202-1246_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 3
**Generated:** 2026-02-02 12:46:46
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.service.ts_20260202-1246_3_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 4
**Generated:** 2026-02-02 12:46:59
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.service.ts_20260202-1246_4_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/coordinator-integration.service.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:47:09
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-coordinator-integration.service.ts_20260202-1247_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/create-coordinator-job.dto.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:16:37
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-create-coordinator-job.dto.ts_20260202-1216_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/create-coordinator-job.dto.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:18:13
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-create-coordinator-job.dto.ts_20260202-1218_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/create-coordinator-job.dto.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:18:26
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-create-coordinator-job.dto.ts_20260202-1218_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/create-coordinator-job.dto.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:19:03
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-create-coordinator-job.dto.ts_20260202-1219_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/create-coordinator-job.dto.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 2
**Generated:** 2026-02-02 12:19:30
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-create-coordinator-job.dto.ts_20260202-1219_2_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/dto-validation.spec.ts
**Tool Used:** Write
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:15:44
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-dto-validation.spec.ts_20260202-1215_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/dto-validation.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:18:52
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-dto-validation.spec.ts_20260202-1218_1_remediation_needed.md"
```

View File

@@ -0,0 +1,20 @@
# QA Remediation Report
**File:** /home/localadmin/src/mosaic-stack/apps/api/src/coordinator-integration/dto/dto-validation.spec.ts
**Tool Used:** Edit
**Epic:** general
**Iteration:** 1
**Generated:** 2026-02-02 12:19:47
## Status
Pending QA validation
## Next Steps
This report was created by the QA automation hook.
To process this report, run:
```bash
claude -p "Use Task tool to launch universal-qa-agent for report: /home/localadmin/src/mosaic-stack/docs/reports/qa-automation/pending/home-localadmin-src-mosaic-stack-apps-api-src-coordinator-integration-dto-dto-validation.spec.ts_20260202-1219_1_remediation_needed.md"
```

Some files were not shown because too many files have changed in this diff Show More