feat(chat): add guest chat mode for unauthenticated users
Some checks failed
ci/woodpecker/push/ci Pipeline failed
Some checks failed
ci/woodpecker/push/ci Pipeline failed
- Add POST /api/chat/guest endpoint (no auth required) - Add proxyGuestChat() method using configurable LLM endpoint - Add streamGuestChat() function to frontend chat API - Modify useChat to fall back to guest mode on auth errors (403/401) - Remove !user check from ChatInput disabled prop - Configure guest LLM via env vars: GUEST_LLM_URL, GUEST_LLM_API_KEY, GUEST_LLM_MODEL - Default guest LLM: http://10.1.1.42:11434/v1 (Ollama) with llama3.2 model
This commit is contained in:
@@ -6,7 +6,6 @@ import {
|
||||
Post,
|
||||
Req,
|
||||
Res,
|
||||
UnauthorizedException,
|
||||
UseGuards,
|
||||
} from "@nestjs/common";
|
||||
import type { Response } from "express";
|
||||
@@ -16,16 +15,72 @@ import { ChatStreamDto } from "./chat-proxy.dto";
|
||||
import { ChatProxyService } from "./chat-proxy.service";
|
||||
|
||||
@Controller("chat")
|
||||
@UseGuards(AuthGuard)
|
||||
export class ChatProxyController {
|
||||
private readonly logger = new Logger(ChatProxyController.name);
|
||||
|
||||
constructor(private readonly chatProxyService: ChatProxyService) {}
|
||||
|
||||
// POST /api/chat/guest
|
||||
// Guest chat endpoint - no authentication required
|
||||
// Uses a shared LLM configuration for unauthenticated users
|
||||
@Post("guest")
|
||||
async guestChat(
|
||||
@Body() body: ChatStreamDto,
|
||||
@Req() req: MaybeAuthenticatedRequest,
|
||||
@Res() res: Response
|
||||
): Promise<void> {
|
||||
const abortController = new AbortController();
|
||||
req.once("close", () => {
|
||||
abortController.abort();
|
||||
});
|
||||
|
||||
res.setHeader("Content-Type", "text/event-stream");
|
||||
res.setHeader("Cache-Control", "no-cache");
|
||||
res.setHeader("Connection", "keep-alive");
|
||||
res.setHeader("X-Accel-Buffering", "no");
|
||||
|
||||
try {
|
||||
const upstreamResponse = await this.chatProxyService.proxyGuestChat(
|
||||
body.messages,
|
||||
abortController.signal
|
||||
);
|
||||
|
||||
const upstreamContentType = upstreamResponse.headers.get("content-type");
|
||||
if (upstreamContentType) {
|
||||
res.setHeader("Content-Type", upstreamContentType);
|
||||
}
|
||||
|
||||
if (!upstreamResponse.body) {
|
||||
throw new Error("LLM response did not include a stream body");
|
||||
}
|
||||
|
||||
for await (const chunk of upstreamResponse.body as unknown as AsyncIterable<Uint8Array>) {
|
||||
if (res.writableEnded || res.destroyed) {
|
||||
break;
|
||||
}
|
||||
|
||||
res.write(Buffer.from(chunk));
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
this.logStreamError(error);
|
||||
|
||||
if (!res.writableEnded && !res.destroyed) {
|
||||
res.write("event: error\n");
|
||||
res.write(`data: ${JSON.stringify({ error: this.toSafeClientMessage(error) })}\n\n`);
|
||||
}
|
||||
} finally {
|
||||
if (!res.writableEnded && !res.destroyed) {
|
||||
res.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// POST /api/chat/stream
|
||||
// Request: { messages: Array<{role, content}> }
|
||||
// Response: SSE stream of chat completion events
|
||||
// Requires authentication - uses user's personal OpenClaw container
|
||||
@Post("stream")
|
||||
@UseGuards(AuthGuard)
|
||||
async streamChat(
|
||||
@Body() body: ChatStreamDto,
|
||||
@Req() req: MaybeAuthenticatedRequest,
|
||||
@@ -33,7 +88,8 @@ export class ChatProxyController {
|
||||
): Promise<void> {
|
||||
const userId = req.user?.id;
|
||||
if (!userId) {
|
||||
throw new UnauthorizedException("No authenticated user found on request");
|
||||
this.logger.warn("streamChat called without user ID after AuthGuard");
|
||||
throw new HttpException("Authentication required", 401);
|
||||
}
|
||||
|
||||
const abortController = new AbortController();
|
||||
|
||||
@@ -4,11 +4,14 @@ import {
|
||||
Logger,
|
||||
ServiceUnavailableException,
|
||||
} from "@nestjs/common";
|
||||
import { ConfigService } from "@nestjs/config";
|
||||
import { ContainerLifecycleService } from "../container-lifecycle/container-lifecycle.service";
|
||||
import { PrismaService } from "../prisma/prisma.service";
|
||||
import type { ChatMessage } from "./chat-proxy.dto";
|
||||
|
||||
const DEFAULT_OPENCLAW_MODEL = "openclaw:default";
|
||||
const DEFAULT_GUEST_LLM_URL = "http://10.1.1.42:11434/v1";
|
||||
const DEFAULT_GUEST_LLM_MODEL = "llama3.2";
|
||||
|
||||
interface ContainerConnection {
|
||||
url: string;
|
||||
@@ -21,7 +24,8 @@ export class ChatProxyService {
|
||||
|
||||
constructor(
|
||||
private readonly prisma: PrismaService,
|
||||
private readonly containerLifecycle: ContainerLifecycleService
|
||||
private readonly containerLifecycle: ContainerLifecycleService,
|
||||
private readonly config: ConfigService
|
||||
) {}
|
||||
|
||||
// Get the user's OpenClaw container URL and mark it active.
|
||||
@@ -79,6 +83,68 @@ export class ChatProxyService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Proxy guest chat request to configured LLM endpoint.
|
||||
* Uses environment variables for configuration:
|
||||
* - GUEST_LLM_URL: OpenAI-compatible endpoint URL
|
||||
* - GUEST_LLM_API_KEY: API key (optional, for cloud providers)
|
||||
* - GUEST_LLM_MODEL: Model name to use
|
||||
*/
|
||||
async proxyGuestChat(
|
||||
messages: ChatMessage[],
|
||||
signal?: AbortSignal
|
||||
): Promise<Response> {
|
||||
const llmUrl = this.config.get<string>("GUEST_LLM_URL") ?? DEFAULT_GUEST_LLM_URL;
|
||||
const llmApiKey = this.config.get<string>("GUEST_LLM_API_KEY");
|
||||
const llmModel = this.config.get<string>("GUEST_LLM_MODEL") ?? DEFAULT_GUEST_LLM_MODEL;
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
};
|
||||
|
||||
if (llmApiKey) {
|
||||
headers["Authorization"] = `Bearer ${llmApiKey}`;
|
||||
}
|
||||
|
||||
const requestInit: RequestInit = {
|
||||
method: "POST",
|
||||
headers,
|
||||
body: JSON.stringify({
|
||||
messages,
|
||||
model: llmModel,
|
||||
stream: true,
|
||||
}),
|
||||
};
|
||||
|
||||
if (signal) {
|
||||
requestInit.signal = signal;
|
||||
}
|
||||
|
||||
try {
|
||||
this.logger.debug(`Guest chat proxying to ${llmUrl} with model ${llmModel}`);
|
||||
const response = await fetch(`${llmUrl}/chat/completions`, requestInit);
|
||||
|
||||
if (!response.ok) {
|
||||
const detail = await this.readResponseText(response);
|
||||
const status = `${String(response.status)} ${response.statusText}`.trim();
|
||||
this.logger.warn(
|
||||
detail ? `Guest LLM returned ${status}: ${detail}` : `Guest LLM returned ${status}`
|
||||
);
|
||||
throw new BadGatewayException(`Guest LLM returned ${status}`);
|
||||
}
|
||||
|
||||
return response;
|
||||
} catch (error: unknown) {
|
||||
if (error instanceof BadGatewayException) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
const message = error instanceof Error ? error.message : String(error);
|
||||
this.logger.warn(`Failed to proxy guest chat request: ${message}`);
|
||||
throw new ServiceUnavailableException("Failed to proxy guest chat to LLM");
|
||||
}
|
||||
}
|
||||
|
||||
private async getContainerConnection(userId: string): Promise<ContainerConnection> {
|
||||
const connection = await this.containerLifecycle.ensureRunning(userId);
|
||||
await this.containerLifecycle.touch(userId);
|
||||
|
||||
Reference in New Issue
Block a user