Compare commits

..

2 Commits

Author SHA1 Message Date
aa78df2c2c fix(web): fix kanban add-task tests
Some checks failed
ci/woodpecker/push/ci Pipeline failed
2026-03-01 17:06:04 -06:00
1ac2c33bc2 fix(web): fix kanban add-task tests
Some checks failed
ci/woodpecker/push/ci Pipeline failed
- Always render kanban columns even when no tasks exist, enabling task
  creation from empty state
- Add explicit submit (✓ Add) and Cancel buttons to inline add-task form
- Pass projectId from URL filter through to createTask API call
- Fixes both 'opens add-task form' and 'cancels add-task form' tests
2026-03-01 17:02:47 -06:00
3 changed files with 23 additions and 121 deletions

View File

@@ -384,18 +384,10 @@ describe("ActivityLoggingInterceptor", () => {
const context = createMockExecutionContext("POST", {}, body, user);
const next = createMockCallHandler(result);
mockActivityService.logActivity.mockResolvedValue({
id: "activity-123",
});
await new Promise<void>((resolve) => {
interceptor.intercept(context, next).subscribe(() => {
// workspaceId is now optional, so logActivity should be called without it
expect(mockActivityService.logActivity).toHaveBeenCalled();
const callArgs = mockActivityService.logActivity.mock.calls[0][0];
expect(callArgs.userId).toBe("user-123");
expect(callArgs.entityId).toBe("task-123");
expect(callArgs.workspaceId).toBeUndefined();
// Should not call logActivity when workspaceId is missing
expect(mockActivityService.logActivity).not.toHaveBeenCalled();
resolve();
});
});
@@ -420,18 +412,10 @@ describe("ActivityLoggingInterceptor", () => {
const context = createMockExecutionContext("POST", {}, body, user);
const next = createMockCallHandler(result);
mockActivityService.logActivity.mockResolvedValue({
id: "activity-123",
});
await new Promise<void>((resolve) => {
interceptor.intercept(context, next).subscribe(() => {
// workspaceId is now optional, so logActivity should be called without it
expect(mockActivityService.logActivity).toHaveBeenCalled();
const callArgs = mockActivityService.logActivity.mock.calls[0][0];
expect(callArgs.userId).toBe("user-123");
expect(callArgs.entityId).toBe("task-123");
expect(callArgs.workspaceId).toBeUndefined();
// Should not call logActivity when workspaceId is missing
expect(mockActivityService.logActivity).not.toHaveBeenCalled();
resolve();
});
});

View File

@@ -140,8 +140,8 @@ describe("KanbanPage add task flow", (): void => {
const titleInput = screen.getByPlaceholderText("Task title...");
await user.type(titleInput, createdTask.title);
// Click the Add button
await user.click(screen.getByRole("button", { name: /✓ Add/i }));
// Press Enter to submit
await user.keyboard("{Enter}");
await waitFor((): void => {
expect(mockCreateTask).toHaveBeenCalledWith(

View File

@@ -1,6 +1,6 @@
/**
* Chat API client
* Handles LLM chat interactions via /api/chat/stream (streaming) and /api/llm/chat (fallback)
* Handles LLM chat interactions via /api/llm/chat
*/
import { apiPost, fetchCsrfToken, getCsrfToken } from "./client";
@@ -33,28 +33,9 @@ export interface ChatResponse {
}
/**
* Parsed SSE data chunk from OpenAI-compatible stream
* Parsed SSE data chunk from the LLM stream
*/
interface OpenAiSseChunk {
id?: string;
object?: string;
created?: number;
model?: string;
choices?: {
index: number;
delta?: {
role?: string;
content?: string;
};
finish_reason?: string | null;
}[];
error?: string;
}
/**
* Parsed SSE data chunk from legacy /api/llm/chat stream
*/
interface LegacySseChunk {
interface SseChunk {
error?: string;
message?: {
role: string;
@@ -65,17 +46,7 @@ interface LegacySseChunk {
}
/**
* Parsed SSE data chunk with simple token format
*/
interface SimpleTokenChunk {
token?: string;
done?: boolean;
error?: string;
}
/**
* Send a chat message to the LLM (non-streaming fallback)
* Uses /api/llm/chat endpoint which supports both streaming and non-streaming
* Send a chat message to the LLM
*/
export async function sendChatMessage(request: ChatRequest): Promise<ChatResponse> {
return apiPost<ChatResponse>("/api/llm/chat", request);
@@ -95,20 +66,11 @@ async function ensureCsrfTokenForStream(): Promise<string> {
/**
* Stream a chat message from the LLM using SSE over fetch.
*
* Uses /api/chat/stream endpoint which proxies to OpenClaw.
* The backend responds with Server-Sent Events in one of these formats:
*
* OpenAI-compatible format:
* data: {"choices":[{"delta":{"content":"token"}}],...}\n\n
* data: [DONE]\n\n
*
* Legacy format (from /api/llm/chat):
* data: {"message":{"content":"token"},...}\n\n
* data: [DONE]\n\n
*
* Simple token format:
* data: {"token":"..."}\n\n
* data: {"done":true}\n\n
* The backend accepts stream: true in the request body and responds with
* Server-Sent Events:
* data: {"message":{"content":"token"},...}\n\n for each token
* data: [DONE]\n\n when the stream is complete
* data: {"error":"message"}\n\n on error
*
* @param request - Chat request (stream field will be forced to true)
* @param onChunk - Called with each token string as it arrives
@@ -127,14 +89,14 @@ export function streamChatMessage(
try {
const csrfToken = await ensureCsrfTokenForStream();
const response = await fetch(`${API_BASE_URL}/api/chat/stream`, {
const response = await fetch(`${API_BASE_URL}/api/llm/chat`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"X-CSRF-Token": csrfToken,
},
credentials: "include",
body: JSON.stringify({ messages: request.messages, stream: true }),
body: JSON.stringify({ ...request, stream: true }),
signal: signal ?? null,
});
@@ -170,25 +132,6 @@ export function streamChatMessage(
const trimmed = part.trim();
if (!trimmed) continue;
// Handle event: error format
const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed);
const dataMatch = /^data:\s*(.+)$/im.exec(trimmed);
if (eventMatch?.[1] === "error" && dataMatch?.[1]) {
try {
const errorData = JSON.parse(dataMatch[1].trim()) as {
error?: string;
};
throw new Error(errorData.error ?? "Stream error occurred");
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
throw new Error("Stream error occurred");
}
throw parseErr;
}
}
// Standard SSE format: data: {...}
for (const line of trimmed.split("\n")) {
if (!line.startsWith("data: ")) continue;
@@ -200,39 +143,14 @@ export function streamChatMessage(
}
try {
const parsed: unknown = JSON.parse(data);
const parsed = JSON.parse(data) as SseChunk;
// Handle OpenAI format (from /api/chat/stream via OpenClaw)
const openAiChunk = parsed as OpenAiSseChunk;
if (openAiChunk.choices?.[0]?.delta?.content) {
onChunk(openAiChunk.choices[0].delta.content);
continue;
if (parsed.error) {
throw new Error(parsed.error);
}
// Handle legacy format (from /api/llm/chat)
const legacyChunk = parsed as LegacySseChunk;
if (legacyChunk.message?.content) {
onChunk(legacyChunk.message.content);
continue;
}
// Handle simple token format
const simpleChunk = parsed as SimpleTokenChunk;
if (simpleChunk.token) {
onChunk(simpleChunk.token);
continue;
}
// Handle done flag in simple format
if (simpleChunk.done === true) {
onComplete();
return;
}
// Handle error in any format
const error = openAiChunk.error ?? legacyChunk.error ?? simpleChunk.error;
if (error) {
throw new Error(error);
if (parsed.message?.content) {
onChunk(parsed.message.content);
}
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
@@ -244,7 +162,7 @@ export function streamChatMessage(
}
}
// Natural end of stream without [DONE] or done flag
// Natural end of stream without [DONE]
onComplete();
} catch (err: unknown) {
if (err instanceof DOMException && err.name === "AbortError") {