Compare commits

...

2 Commits

Author SHA1 Message Date
5a759541e4 feat: wire chat to /api/chat/stream SSE endpoint
Some checks failed
ci/woodpecker/push/ci Pipeline failed
2026-03-01 16:53:58 -06:00
6eb91c9eba fix(api): security hardening — helmet + auth rate limiting (#641)
Some checks failed
ci/woodpecker/push/ci Pipeline failed
Co-authored-by: Jason Woltje <jason@diversecanvas.com>
Co-committed-by: Jason Woltje <jason@diversecanvas.com>
2026-03-01 22:43:10 +00:00
5 changed files with 119 additions and 18 deletions

View File

@@ -62,6 +62,7 @@
"discord.js": "^14.25.1",
"dockerode": "^4.0.9",
"gray-matter": "^4.0.3",
"helmet": "^8.1.0",
"highlight.js": "^11.11.1",
"ioredis": "^5.9.2",
"jose": "^6.1.3",

View File

@@ -106,7 +106,7 @@ export class AuthController {
// @SkipCsrf avoids double-protection conflicts.
// See: https://www.better-auth.com/docs/reference/security
@SkipCsrf()
@Throttle({ strict: { limit: 10, ttl: 60000 } })
@Throttle({ default: { ttl: 60_000, limit: 5 } })
async handleAuth(@Req() req: ExpressRequest, @Res() res: ExpressResponse): Promise<void> {
// Extract client IP for logging
const clientIp = this.getClientIp(req);

View File

@@ -1,6 +1,7 @@
import { NestFactory } from "@nestjs/core";
import { RequestMethod, ValidationPipe } from "@nestjs/common";
import cookieParser from "cookie-parser";
import helmet from "helmet";
import { AppModule } from "./app.module";
import { getTrustedOrigins } from "./auth/auth.config";
import { GlobalExceptionFilter } from "./filters/global-exception.filter";
@@ -33,6 +34,14 @@ async function bootstrap() {
// Enable cookie parser for session handling
app.use(cookieParser());
// Enable helmet security headers
app.use(
helmet({
contentSecurityPolicy: false, // Let Next.js handle CSP
crossOriginEmbedderPolicy: false,
})
);
// Enable global validation pipe with transformation
app.useGlobalPipes(
new ValidationPipe({

View File

@@ -1,6 +1,6 @@
/**
* Chat API client
* Handles LLM chat interactions via /api/llm/chat
* Handles LLM chat interactions via /api/chat/stream (streaming) and /api/llm/chat (fallback)
*/
import { apiPost, fetchCsrfToken, getCsrfToken } from "./client";
@@ -33,9 +33,28 @@ export interface ChatResponse {
}
/**
* Parsed SSE data chunk from the LLM stream
* Parsed SSE data chunk from OpenAI-compatible stream
*/
interface SseChunk {
interface OpenAiSseChunk {
id?: string;
object?: string;
created?: number;
model?: string;
choices?: {
index: number;
delta?: {
role?: string;
content?: string;
};
finish_reason?: string | null;
}[];
error?: string;
}
/**
* Parsed SSE data chunk from legacy /api/llm/chat stream
*/
interface LegacySseChunk {
error?: string;
message?: {
role: string;
@@ -46,7 +65,17 @@ interface SseChunk {
}
/**
* Send a chat message to the LLM
* Parsed SSE data chunk with simple token format
*/
interface SimpleTokenChunk {
token?: string;
done?: boolean;
error?: string;
}
/**
* Send a chat message to the LLM (non-streaming fallback)
* Uses /api/llm/chat endpoint which supports both streaming and non-streaming
*/
export async function sendChatMessage(request: ChatRequest): Promise<ChatResponse> {
return apiPost<ChatResponse>("/api/llm/chat", request);
@@ -66,11 +95,20 @@ async function ensureCsrfTokenForStream(): Promise<string> {
/**
* Stream a chat message from the LLM using SSE over fetch.
*
* The backend accepts stream: true in the request body and responds with
* Server-Sent Events:
* data: {"message":{"content":"token"},...}\n\n for each token
* data: [DONE]\n\n when the stream is complete
* data: {"error":"message"}\n\n on error
* Uses /api/chat/stream endpoint which proxies to OpenClaw.
* The backend responds with Server-Sent Events in one of these formats:
*
* OpenAI-compatible format:
* data: {"choices":[{"delta":{"content":"token"}}],...}\n\n
* data: [DONE]\n\n
*
* Legacy format (from /api/llm/chat):
* data: {"message":{"content":"token"},...}\n\n
* data: [DONE]\n\n
*
* Simple token format:
* data: {"token":"..."}\n\n
* data: {"done":true}\n\n
*
* @param request - Chat request (stream field will be forced to true)
* @param onChunk - Called with each token string as it arrives
@@ -89,14 +127,14 @@ export function streamChatMessage(
try {
const csrfToken = await ensureCsrfTokenForStream();
const response = await fetch(`${API_BASE_URL}/api/llm/chat`, {
const response = await fetch(`${API_BASE_URL}/api/chat/stream`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"X-CSRF-Token": csrfToken,
},
credentials: "include",
body: JSON.stringify({ ...request, stream: true }),
body: JSON.stringify({ messages: request.messages, stream: true }),
signal: signal ?? null,
});
@@ -132,6 +170,25 @@ export function streamChatMessage(
const trimmed = part.trim();
if (!trimmed) continue;
// Handle event: error format
const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed);
const dataMatch = /^data:\s*(.+)$/im.exec(trimmed);
if (eventMatch?.[1] === "error" && dataMatch?.[1]) {
try {
const errorData = JSON.parse(dataMatch[1].trim()) as {
error?: string;
};
throw new Error(errorData.error ?? "Stream error occurred");
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
throw new Error("Stream error occurred");
}
throw parseErr;
}
}
// Standard SSE format: data: {...}
for (const line of trimmed.split("\n")) {
if (!line.startsWith("data: ")) continue;
@@ -143,14 +200,39 @@ export function streamChatMessage(
}
try {
const parsed = JSON.parse(data) as SseChunk;
const parsed: unknown = JSON.parse(data);
if (parsed.error) {
throw new Error(parsed.error);
// Handle OpenAI format (from /api/chat/stream via OpenClaw)
const openAiChunk = parsed as OpenAiSseChunk;
if (openAiChunk.choices?.[0]?.delta?.content) {
onChunk(openAiChunk.choices[0].delta.content);
continue;
}
if (parsed.message?.content) {
onChunk(parsed.message.content);
// Handle legacy format (from /api/llm/chat)
const legacyChunk = parsed as LegacySseChunk;
if (legacyChunk.message?.content) {
onChunk(legacyChunk.message.content);
continue;
}
// Handle simple token format
const simpleChunk = parsed as SimpleTokenChunk;
if (simpleChunk.token) {
onChunk(simpleChunk.token);
continue;
}
// Handle done flag in simple format
if (simpleChunk.done === true) {
onComplete();
return;
}
// Handle error in any format
const error = openAiChunk.error ?? legacyChunk.error ?? simpleChunk.error;
if (error) {
throw new Error(error);
}
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
@@ -162,7 +244,7 @@ export function streamChatMessage(
}
}
// Natural end of stream without [DONE]
// Natural end of stream without [DONE] or done flag
onComplete();
} catch (err: unknown) {
if (err instanceof DOMException && err.name === "AbortError") {

9
pnpm-lock.yaml generated
View File

@@ -180,6 +180,9 @@ importers:
gray-matter:
specifier: ^4.0.3
version: 4.0.3
helmet:
specifier: ^8.1.0
version: 8.1.0
highlight.js:
specifier: ^11.11.1
version: 11.11.1
@@ -5210,6 +5213,10 @@ packages:
resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==}
engines: {node: '>= 0.4'}
helmet@8.1.0:
resolution: {integrity: sha512-jOiHyAZsmnr8LqoPGmCjYAaiuWwjAPLgY8ZX2XrmHawt99/u1y6RgrZMTeoPfpUbV96HOalYgz1qzkRbw54Pmg==}
engines: {node: '>=18.0.0'}
highlight.js@11.11.1:
resolution: {integrity: sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==}
engines: {node: '>=12.0.0'}
@@ -12815,6 +12822,8 @@ snapshots:
dependencies:
function-bind: 1.1.2
helmet@8.1.0: {}
highlight.js@11.11.1: {}
html-encoding-sniffer@4.0.0: