Compare commits

..

2 Commits

Author SHA1 Message Date
c25b77ae39 chore(api): add helmet dependency
Some checks failed
ci/woodpecker/push/ci Pipeline failed
2026-03-01 16:39:14 -06:00
45ce76061b fix(api): helmet security headers + auth endpoint rate limiting 2026-03-01 16:39:10 -06:00

View File

@@ -1,6 +1,6 @@
/** /**
* Chat API client * Chat API client
* Handles LLM chat interactions via /api/chat/stream (streaming) and /api/llm/chat (fallback) * Handles LLM chat interactions via /api/llm/chat
*/ */
import { apiPost, fetchCsrfToken, getCsrfToken } from "./client"; import { apiPost, fetchCsrfToken, getCsrfToken } from "./client";
@@ -33,28 +33,9 @@ export interface ChatResponse {
} }
/** /**
* Parsed SSE data chunk from OpenAI-compatible stream * Parsed SSE data chunk from the LLM stream
*/ */
interface OpenAiSseChunk { interface SseChunk {
id?: string;
object?: string;
created?: number;
model?: string;
choices?: {
index: number;
delta?: {
role?: string;
content?: string;
};
finish_reason?: string | null;
}[];
error?: string;
}
/**
* Parsed SSE data chunk from legacy /api/llm/chat stream
*/
interface LegacySseChunk {
error?: string; error?: string;
message?: { message?: {
role: string; role: string;
@@ -65,17 +46,7 @@ interface LegacySseChunk {
} }
/** /**
* Parsed SSE data chunk with simple token format * Send a chat message to the LLM
*/
interface SimpleTokenChunk {
token?: string;
done?: boolean;
error?: string;
}
/**
* Send a chat message to the LLM (non-streaming fallback)
* Uses /api/llm/chat endpoint which supports both streaming and non-streaming
*/ */
export async function sendChatMessage(request: ChatRequest): Promise<ChatResponse> { export async function sendChatMessage(request: ChatRequest): Promise<ChatResponse> {
return apiPost<ChatResponse>("/api/llm/chat", request); return apiPost<ChatResponse>("/api/llm/chat", request);
@@ -95,20 +66,11 @@ async function ensureCsrfTokenForStream(): Promise<string> {
/** /**
* Stream a chat message from the LLM using SSE over fetch. * Stream a chat message from the LLM using SSE over fetch.
* *
* Uses /api/chat/stream endpoint which proxies to OpenClaw. * The backend accepts stream: true in the request body and responds with
* The backend responds with Server-Sent Events in one of these formats: * Server-Sent Events:
* * data: {"message":{"content":"token"},...}\n\n for each token
* OpenAI-compatible format: * data: [DONE]\n\n when the stream is complete
* data: {"choices":[{"delta":{"content":"token"}}],...}\n\n * data: {"error":"message"}\n\n on error
* data: [DONE]\n\n
*
* Legacy format (from /api/llm/chat):
* data: {"message":{"content":"token"},...}\n\n
* data: [DONE]\n\n
*
* Simple token format:
* data: {"token":"..."}\n\n
* data: {"done":true}\n\n
* *
* @param request - Chat request (stream field will be forced to true) * @param request - Chat request (stream field will be forced to true)
* @param onChunk - Called with each token string as it arrives * @param onChunk - Called with each token string as it arrives
@@ -127,14 +89,14 @@ export function streamChatMessage(
try { try {
const csrfToken = await ensureCsrfTokenForStream(); const csrfToken = await ensureCsrfTokenForStream();
const response = await fetch(`${API_BASE_URL}/api/chat/stream`, { const response = await fetch(`${API_BASE_URL}/api/llm/chat`, {
method: "POST", method: "POST",
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
"X-CSRF-Token": csrfToken, "X-CSRF-Token": csrfToken,
}, },
credentials: "include", credentials: "include",
body: JSON.stringify({ messages: request.messages, stream: true }), body: JSON.stringify({ ...request, stream: true }),
signal: signal ?? null, signal: signal ?? null,
}); });
@@ -170,25 +132,6 @@ export function streamChatMessage(
const trimmed = part.trim(); const trimmed = part.trim();
if (!trimmed) continue; if (!trimmed) continue;
// Handle event: error format
const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed);
const dataMatch = /^data:\s*(.+)$/im.exec(trimmed);
if (eventMatch?.[1] === "error" && dataMatch?.[1]) {
try {
const errorData = JSON.parse(dataMatch[1].trim()) as {
error?: string;
};
throw new Error(errorData.error ?? "Stream error occurred");
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
throw new Error("Stream error occurred");
}
throw parseErr;
}
}
// Standard SSE format: data: {...}
for (const line of trimmed.split("\n")) { for (const line of trimmed.split("\n")) {
if (!line.startsWith("data: ")) continue; if (!line.startsWith("data: ")) continue;
@@ -200,39 +143,14 @@ export function streamChatMessage(
} }
try { try {
const parsed: unknown = JSON.parse(data); const parsed = JSON.parse(data) as SseChunk;
// Handle OpenAI format (from /api/chat/stream via OpenClaw) if (parsed.error) {
const openAiChunk = parsed as OpenAiSseChunk; throw new Error(parsed.error);
if (openAiChunk.choices?.[0]?.delta?.content) {
onChunk(openAiChunk.choices[0].delta.content);
continue;
} }
// Handle legacy format (from /api/llm/chat) if (parsed.message?.content) {
const legacyChunk = parsed as LegacySseChunk; onChunk(parsed.message.content);
if (legacyChunk.message?.content) {
onChunk(legacyChunk.message.content);
continue;
}
// Handle simple token format
const simpleChunk = parsed as SimpleTokenChunk;
if (simpleChunk.token) {
onChunk(simpleChunk.token);
continue;
}
// Handle done flag in simple format
if (simpleChunk.done === true) {
onComplete();
return;
}
// Handle error in any format
const error = openAiChunk.error ?? legacyChunk.error ?? simpleChunk.error;
if (error) {
throw new Error(error);
} }
} catch (parseErr) { } catch (parseErr) {
if (parseErr instanceof SyntaxError) { if (parseErr instanceof SyntaxError) {
@@ -244,7 +162,7 @@ export function streamChatMessage(
} }
} }
// Natural end of stream without [DONE] or done flag // Natural end of stream without [DONE]
onComplete(); onComplete();
} catch (err: unknown) { } catch (err: unknown) {
if (err instanceof DOMException && err.name === "AbortError") { if (err instanceof DOMException && err.name === "AbortError") {