From c45cec3bba5933a39a8fcb051f3ffe1a4aebdd2d Mon Sep 17 00:00:00 2001 From: Jason Woltje Date: Tue, 3 Mar 2026 11:16:23 -0600 Subject: [PATCH 1/4] feat(chat): add guest chat mode for unauthenticated users - Add POST /api/chat/guest endpoint (no auth required) - Add proxyGuestChat() method using configurable LLM endpoint - Add streamGuestChat() function to frontend chat API - Modify useChat to fall back to guest mode on auth errors (403/401) - Remove !user check from ChatInput disabled prop - Configure guest LLM via env vars: GUEST_LLM_URL, GUEST_LLM_API_KEY, GUEST_LLM_MODEL - Default guest LLM: http://10.1.1.42:11434/v1 (Ollama) with llama3.2 model --- .../src/chat-proxy/chat-proxy.controller.ts | 62 +++++++- apps/api/src/chat-proxy/chat-proxy.service.ts | 68 ++++++++- apps/web/src/components/chat/Chat.tsx | 2 +- apps/web/src/hooks/useChat.ts | 68 ++++++++- apps/web/src/lib/api/chat.ts | 135 ++++++++++++++++++ 5 files changed, 326 insertions(+), 9 deletions(-) diff --git a/apps/api/src/chat-proxy/chat-proxy.controller.ts b/apps/api/src/chat-proxy/chat-proxy.controller.ts index 527580e..2513643 100644 --- a/apps/api/src/chat-proxy/chat-proxy.controller.ts +++ b/apps/api/src/chat-proxy/chat-proxy.controller.ts @@ -6,7 +6,6 @@ import { Post, Req, Res, - UnauthorizedException, UseGuards, } from "@nestjs/common"; import type { Response } from "express"; @@ -16,16 +15,72 @@ import { ChatStreamDto } from "./chat-proxy.dto"; import { ChatProxyService } from "./chat-proxy.service"; @Controller("chat") -@UseGuards(AuthGuard) export class ChatProxyController { private readonly logger = new Logger(ChatProxyController.name); constructor(private readonly chatProxyService: ChatProxyService) {} + // POST /api/chat/guest + // Guest chat endpoint - no authentication required + // Uses a shared LLM configuration for unauthenticated users + @Post("guest") + async guestChat( + @Body() body: ChatStreamDto, + @Req() req: MaybeAuthenticatedRequest, + @Res() res: Response + ): Promise { + const abortController = new AbortController(); + req.once("close", () => { + abortController.abort(); + }); + + res.setHeader("Content-Type", "text/event-stream"); + res.setHeader("Cache-Control", "no-cache"); + res.setHeader("Connection", "keep-alive"); + res.setHeader("X-Accel-Buffering", "no"); + + try { + const upstreamResponse = await this.chatProxyService.proxyGuestChat( + body.messages, + abortController.signal + ); + + const upstreamContentType = upstreamResponse.headers.get("content-type"); + if (upstreamContentType) { + res.setHeader("Content-Type", upstreamContentType); + } + + if (!upstreamResponse.body) { + throw new Error("LLM response did not include a stream body"); + } + + for await (const chunk of upstreamResponse.body as unknown as AsyncIterable) { + if (res.writableEnded || res.destroyed) { + break; + } + + res.write(Buffer.from(chunk)); + } + } catch (error: unknown) { + this.logStreamError(error); + + if (!res.writableEnded && !res.destroyed) { + res.write("event: error\n"); + res.write(`data: ${JSON.stringify({ error: this.toSafeClientMessage(error) })}\n\n`); + } + } finally { + if (!res.writableEnded && !res.destroyed) { + res.end(); + } + } + } + // POST /api/chat/stream // Request: { messages: Array<{role, content}> } // Response: SSE stream of chat completion events + // Requires authentication - uses user's personal OpenClaw container @Post("stream") + @UseGuards(AuthGuard) async streamChat( @Body() body: ChatStreamDto, @Req() req: MaybeAuthenticatedRequest, @@ -33,7 +88,8 @@ export class ChatProxyController { ): Promise { const userId = req.user?.id; if (!userId) { - throw new UnauthorizedException("No authenticated user found on request"); + this.logger.warn("streamChat called without user ID after AuthGuard"); + throw new HttpException("Authentication required", 401); } const abortController = new AbortController(); diff --git a/apps/api/src/chat-proxy/chat-proxy.service.ts b/apps/api/src/chat-proxy/chat-proxy.service.ts index 13334ca..e23e631 100644 --- a/apps/api/src/chat-proxy/chat-proxy.service.ts +++ b/apps/api/src/chat-proxy/chat-proxy.service.ts @@ -4,11 +4,14 @@ import { Logger, ServiceUnavailableException, } from "@nestjs/common"; +import { ConfigService } from "@nestjs/config"; import { ContainerLifecycleService } from "../container-lifecycle/container-lifecycle.service"; import { PrismaService } from "../prisma/prisma.service"; import type { ChatMessage } from "./chat-proxy.dto"; const DEFAULT_OPENCLAW_MODEL = "openclaw:default"; +const DEFAULT_GUEST_LLM_URL = "http://10.1.1.42:11434/v1"; +const DEFAULT_GUEST_LLM_MODEL = "llama3.2"; interface ContainerConnection { url: string; @@ -21,7 +24,8 @@ export class ChatProxyService { constructor( private readonly prisma: PrismaService, - private readonly containerLifecycle: ContainerLifecycleService + private readonly containerLifecycle: ContainerLifecycleService, + private readonly config: ConfigService ) {} // Get the user's OpenClaw container URL and mark it active. @@ -79,6 +83,68 @@ export class ChatProxyService { } } + /** + * Proxy guest chat request to configured LLM endpoint. + * Uses environment variables for configuration: + * - GUEST_LLM_URL: OpenAI-compatible endpoint URL + * - GUEST_LLM_API_KEY: API key (optional, for cloud providers) + * - GUEST_LLM_MODEL: Model name to use + */ + async proxyGuestChat( + messages: ChatMessage[], + signal?: AbortSignal + ): Promise { + const llmUrl = this.config.get("GUEST_LLM_URL") ?? DEFAULT_GUEST_LLM_URL; + const llmApiKey = this.config.get("GUEST_LLM_API_KEY"); + const llmModel = this.config.get("GUEST_LLM_MODEL") ?? DEFAULT_GUEST_LLM_MODEL; + + const headers: Record = { + "Content-Type": "application/json", + }; + + if (llmApiKey) { + headers["Authorization"] = `Bearer ${llmApiKey}`; + } + + const requestInit: RequestInit = { + method: "POST", + headers, + body: JSON.stringify({ + messages, + model: llmModel, + stream: true, + }), + }; + + if (signal) { + requestInit.signal = signal; + } + + try { + this.logger.debug(`Guest chat proxying to ${llmUrl} with model ${llmModel}`); + const response = await fetch(`${llmUrl}/chat/completions`, requestInit); + + if (!response.ok) { + const detail = await this.readResponseText(response); + const status = `${String(response.status)} ${response.statusText}`.trim(); + this.logger.warn( + detail ? `Guest LLM returned ${status}: ${detail}` : `Guest LLM returned ${status}` + ); + throw new BadGatewayException(`Guest LLM returned ${status}`); + } + + return response; + } catch (error: unknown) { + if (error instanceof BadGatewayException) { + throw error; + } + + const message = error instanceof Error ? error.message : String(error); + this.logger.warn(`Failed to proxy guest chat request: ${message}`); + throw new ServiceUnavailableException("Failed to proxy guest chat to LLM"); + } + } + private async getContainerConnection(userId: string): Promise { const connection = await this.containerLifecycle.ensureRunning(userId); await this.containerLifecycle.touch(userId); diff --git a/apps/web/src/components/chat/Chat.tsx b/apps/web/src/components/chat/Chat.tsx index a06a538..8462989 100644 --- a/apps/web/src/components/chat/Chat.tsx +++ b/apps/web/src/components/chat/Chat.tsx @@ -352,7 +352,7 @@ export const Chat = forwardRef(function Chat(
((guestResolve, guestReject) => { + let hasReceivedData = false; + + streamGuestChat( + request, + (chunk: string) => { + if (!hasReceivedData) { + hasReceivedData = true; + setIsLoading(false); + setIsStreaming(true); + setMessages((prev) => { + const updated = [...prev, { ...placeholderMessage }]; + messagesRef.current = updated; + return updated; + }); + } + + setMessages((prev) => { + const updated = prev.map((msg) => + msg.id === assistantMessageId ? { ...msg, content: msg.content + chunk } : msg + ); + messagesRef.current = updated; + return updated; + }); + }, + () => { + streamingSucceeded = true; + setIsStreaming(false); + guestResolve(); + }, + (guestErr: Error) => { + guestReject(guestErr); + }, + controller.signal + ); + }); + } catch (guestErr: unknown) { + // Guest also failed + setMessages((prev) => { + const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId); + messagesRef.current = withoutPlaceholder; + return withoutPlaceholder; + }); + const errorMsg = guestErr instanceof Error ? guestErr.message : "Chat unavailable"; + setError(`Unable to connect to chat: ${errorMsg}`); + setIsLoading(false); + return; + } + } else { + // Streaming failed — fall back to non-streaming + console.warn("Streaming failed, falling back to non-streaming", { + error: err instanceof Error ? err : new Error(String(err)), + }); setMessages((prev) => { const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId); diff --git a/apps/web/src/lib/api/chat.ts b/apps/web/src/lib/api/chat.ts index 73dcc9c..18022c8 100644 --- a/apps/web/src/lib/api/chat.ts +++ b/apps/web/src/lib/api/chat.ts @@ -92,6 +92,141 @@ async function ensureCsrfTokenForStream(): Promise { return fetchCsrfToken(); } +/** + * Stream a guest chat message (no authentication required). + * Uses /api/chat/guest endpoint with shared LLM configuration. + * + * @param request - Chat request + * @param onChunk - Called with each token string as it arrives + * @param onComplete - Called when the stream finishes successfully + * @param onError - Called if the stream encounters an error + * @param signal - Optional AbortSignal for cancellation + */ +export function streamGuestChat( + request: ChatRequest, + onChunk: (chunk: string) => void, + onComplete: () => void, + onError: (error: Error) => void, + signal?: AbortSignal +): void { + void (async (): Promise => { + try { + const response = await fetch(`${API_BASE_URL}/api/chat/guest`, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + credentials: "include", + body: JSON.stringify({ messages: request.messages, stream: true }), + signal: signal ?? null, + }); + + if (!response.ok) { + const errorText = await response.text().catch(() => response.statusText); + throw new Error(`Guest chat failed: ${errorText}`); + } + + if (!response.body) { + throw new Error("Response body is not readable"); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder("utf-8"); + let buffer = ""; + + let readerDone = false; + while (!readerDone) { + const { done, value } = await reader.read(); + readerDone = done; + if (done) { + break; + } + + buffer += decoder.decode(value, { stream: true }); + + // SSE messages are separated by double newlines + const parts = buffer.split("\n\n"); + buffer = parts.pop() ?? ""; + + for (const part of parts) { + const trimmed = part.trim(); + if (!trimmed) continue; + + // Handle event: error format + const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed); + const dataMatch = /^data:\s*(.+)$/im.exec(trimmed); + + if (eventMatch?.[1] === "error" && dataMatch?.[1]) { + try { + const errorData = JSON.parse(dataMatch[1].trim()) as { + error?: string; + }; + throw new Error(errorData.error ?? "Stream error occurred"); + } catch (parseErr) { + if (parseErr instanceof SyntaxError) { + throw new Error("Stream error occurred"); + } + throw parseErr; + } + } + + // Standard SSE format: data: {...} + for (const line of trimmed.split("\n")) { + if (!line.startsWith("data: ")) continue; + + const data = line.slice("data: ".length).trim(); + + if (data === "[DONE]") { + onComplete(); + return; + } + + try { + const parsed: unknown = JSON.parse(data); + + // Handle OpenAI format + const openAiChunk = parsed as OpenAiSseChunk; + if (openAiChunk.choices?.[0]?.delta?.content) { + onChunk(openAiChunk.choices[0].delta.content); + continue; + } + + // Handle simple token format + const simpleChunk = parsed as SimpleTokenChunk; + if (simpleChunk.token) { + onChunk(simpleChunk.token); + continue; + } + + if (simpleChunk.done === true) { + onComplete(); + return; + } + + const error = openAiChunk.error ?? simpleChunk.error; + if (error) { + throw new Error(error); + } + } catch (parseErr) { + if (parseErr instanceof SyntaxError) { + continue; + } + throw parseErr; + } + } + } + } + + onComplete(); + } catch (err: unknown) { + if (err instanceof DOMException && err.name === "AbortError") { + return; + } + onError(err instanceof Error ? err : new Error(String(err))); + } + })(); +} + /** * Stream a chat message from the LLM using SSE over fetch. * From 83477165d405b7a223db31aef35a343af4fe8011 Mon Sep 17 00:00:00 2001 From: Jason Woltje Date: Tue, 3 Mar 2026 11:22:18 -0600 Subject: [PATCH 2/4] fix(chat): correct indentation in useChat guest fallback --- apps/web/src/hooks/useChat.ts | 107 +++++++++++++++++----------------- 1 file changed, 54 insertions(+), 53 deletions(-) diff --git a/apps/web/src/hooks/useChat.ts b/apps/web/src/hooks/useChat.ts index 881e9a4..c9fe796 100644 --- a/apps/web/src/hooks/useChat.ts +++ b/apps/web/src/hooks/useChat.ts @@ -343,63 +343,64 @@ export function useChat(options: UseChatOptions = {}): UseChatReturn { error: err instanceof Error ? err : new Error(String(err)), }); - setMessages((prev) => { - const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId); - messagesRef.current = withoutPlaceholder; - return withoutPlaceholder; - }); - setIsStreaming(false); - - try { - const response = await sendChatMessage(request); - - const assistantMessage: Message = { - id: `assistant-${Date.now().toString()}`, - role: "assistant", - content: response.message.content, - createdAt: new Date().toISOString(), - model: response.model, - promptTokens: response.promptEvalCount ?? 0, - completionTokens: response.evalCount ?? 0, - totalTokens: (response.promptEvalCount ?? 0) + (response.evalCount ?? 0), - }; - setMessages((prev) => { - const updated = [...prev, assistantMessage]; - messagesRef.current = updated; - return updated; + const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId); + messagesRef.current = withoutPlaceholder; + return withoutPlaceholder; }); + setIsStreaming(false); - streamingSucceeded = true; - } catch (fallbackErr: unknown) { - const errorMsg = - fallbackErr instanceof Error ? fallbackErr.message : "Failed to send message"; - setError("Unable to send message. Please try again."); - onError?.(fallbackErr instanceof Error ? fallbackErr : new Error(errorMsg)); - console.error("Failed to send chat message", { - error: fallbackErr, - errorType: "LLM_ERROR", - conversationId: conversationIdRef.current, - messageLength: content.length, - messagePreview: content.substring(0, 50), - model, - messageCount: messagesRef.current.length, - timestamp: new Date().toISOString(), - }); + try { + const response = await sendChatMessage(request); - const errorMessage: Message = { - id: `error-${String(Date.now())}`, - role: "assistant", - content: "Something went wrong. Please try again.", - createdAt: new Date().toISOString(), - }; - setMessages((prev) => { - const updated = [...prev, errorMessage]; - messagesRef.current = updated; - return updated; - }); - setIsLoading(false); - return; + const assistantMessage: Message = { + id: `assistant-${Date.now().toString()}`, + role: "assistant", + content: response.message.content, + createdAt: new Date().toISOString(), + model: response.model, + promptTokens: response.promptEvalCount ?? 0, + completionTokens: response.evalCount ?? 0, + totalTokens: (response.promptEvalCount ?? 0) + (response.evalCount ?? 0), + }; + + setMessages((prev) => { + const updated = [...prev, assistantMessage]; + messagesRef.current = updated; + return updated; + }); + + streamingSucceeded = true; + } catch (fallbackErr: unknown) { + const errorMsg = + fallbackErr instanceof Error ? fallbackErr.message : "Failed to send message"; + setError("Unable to send message. Please try again."); + onError?.(fallbackErr instanceof Error ? fallbackErr : new Error(errorMsg)); + console.error("Failed to send chat message", { + error: fallbackErr, + errorType: "LLM_ERROR", + conversationId: conversationIdRef.current, + messageLength: content.length, + messagePreview: content.substring(0, 50), + model, + messageCount: messagesRef.current.length, + timestamp: new Date().toISOString(), + }); + + const errorMessage: Message = { + id: `error-${String(Date.now())}`, + role: "assistant", + content: "Something went wrong. Please try again.", + createdAt: new Date().toISOString(), + }; + setMessages((prev) => { + const updated = [...prev, errorMessage]; + messagesRef.current = updated; + return updated; + }); + setIsLoading(false); + return; + } } } From 48d734516a79427f6234afcddc4a19e580d316c5 Mon Sep 17 00:00:00 2001 From: Jason Woltje Date: Tue, 3 Mar 2026 11:36:15 -0600 Subject: [PATCH 3/4] fix(lint): resolve prettier and dot-notation errors --- apps/api/src/chat-proxy/chat-proxy.controller.ts | 11 +---------- apps/api/src/chat-proxy/chat-proxy.service.ts | 7 ++----- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/apps/api/src/chat-proxy/chat-proxy.controller.ts b/apps/api/src/chat-proxy/chat-proxy.controller.ts index 2513643..763e56d 100644 --- a/apps/api/src/chat-proxy/chat-proxy.controller.ts +++ b/apps/api/src/chat-proxy/chat-proxy.controller.ts @@ -1,13 +1,4 @@ -import { - Body, - Controller, - HttpException, - Logger, - Post, - Req, - Res, - UseGuards, -} from "@nestjs/common"; +import { Body, Controller, HttpException, Logger, Post, Req, Res, UseGuards } from "@nestjs/common"; import type { Response } from "express"; import { AuthGuard } from "../auth/guards/auth.guard"; import type { MaybeAuthenticatedRequest } from "../auth/types/better-auth-request.interface"; diff --git a/apps/api/src/chat-proxy/chat-proxy.service.ts b/apps/api/src/chat-proxy/chat-proxy.service.ts index e23e631..731fb58 100644 --- a/apps/api/src/chat-proxy/chat-proxy.service.ts +++ b/apps/api/src/chat-proxy/chat-proxy.service.ts @@ -90,10 +90,7 @@ export class ChatProxyService { * - GUEST_LLM_API_KEY: API key (optional, for cloud providers) * - GUEST_LLM_MODEL: Model name to use */ - async proxyGuestChat( - messages: ChatMessage[], - signal?: AbortSignal - ): Promise { + async proxyGuestChat(messages: ChatMessage[], signal?: AbortSignal): Promise { const llmUrl = this.config.get("GUEST_LLM_URL") ?? DEFAULT_GUEST_LLM_URL; const llmApiKey = this.config.get("GUEST_LLM_API_KEY"); const llmModel = this.config.get("GUEST_LLM_MODEL") ?? DEFAULT_GUEST_LLM_MODEL; @@ -103,7 +100,7 @@ export class ChatProxyService { }; if (llmApiKey) { - headers["Authorization"] = `Bearer ${llmApiKey}`; + headers.Authorization = `Bearer ${llmApiKey}`; } const requestInit: RequestInit = { From 1a6cf113c859f053a55e583d919c628bb7710ac3 Mon Sep 17 00:00:00 2001 From: Jason Woltje Date: Tue, 3 Mar 2026 11:46:05 -0600 Subject: [PATCH 4/4] fix(lint): resolve prettier formatting in useChat.ts --- apps/web/src/hooks/useChat.ts | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/apps/web/src/hooks/useChat.ts b/apps/web/src/hooks/useChat.ts index c9fe796..b98ab03 100644 --- a/apps/web/src/hooks/useChat.ts +++ b/apps/web/src/hooks/useChat.ts @@ -280,13 +280,16 @@ export function useChat(options: UseChatOptions = {}): UseChatReturn { } // Streaming failed - check if auth error, try guest mode - const isAuthError = err instanceof Error && - (err.message.includes("403") || err.message.includes("401") || - err.message.includes("auth") || err.message.includes("Forbidden")); + const isAuthError = + err instanceof Error && + (err.message.includes("403") || + err.message.includes("401") || + err.message.includes("auth") || + err.message.includes("Forbidden")); if (isAuthError) { console.warn("Auth failed, trying guest chat mode"); - + // Try guest chat streaming try { await new Promise((guestResolve, guestReject) => {