feat(chat): add guest chat mode for unauthenticated users
Some checks failed
ci/woodpecker/push/ci Pipeline failed

- Add POST /api/chat/guest endpoint (no auth required)
- Add proxyGuestChat() method using configurable LLM endpoint
- Add streamGuestChat() function to frontend chat API
- Modify useChat to fall back to guest mode on auth errors (403/401)
- Remove !user check from ChatInput disabled prop
- Configure guest LLM via env vars: GUEST_LLM_URL, GUEST_LLM_API_KEY, GUEST_LLM_MODEL
- Default guest LLM: http://10.1.1.42:11434/v1 (Ollama) with llama3.2 model
This commit is contained in:
2026-03-03 11:16:23 -06:00
parent b1baa70e00
commit c45cec3bba
5 changed files with 326 additions and 9 deletions

View File

@@ -352,7 +352,7 @@ export const Chat = forwardRef<ChatRef, ChatProps>(function Chat(
<div className="mx-auto max-w-4xl px-4 py-4 lg:px-8">
<ChatInput
onSend={handleSendMessage}
disabled={isChatLoading || !user}
disabled={isChatLoading}
inputRef={inputRef}
isStreaming={isStreaming}
onStopStreaming={abortStream}

View File

@@ -7,6 +7,7 @@ import { useState, useCallback, useRef } from "react";
import {
sendChatMessage,
streamChatMessage,
streamGuestChat,
type ChatMessage as ApiChatMessage,
} from "@/lib/api/chat";
import { createConversation, updateConversation, getIdea, type Idea } from "@/lib/api/ideas";
@@ -278,10 +279,69 @@ export function useChat(options: UseChatOptions = {}): UseChatReturn {
return;
}
// Streaming failed — fall back to non-streaming
console.warn("Streaming failed, falling back to non-streaming", {
error: err instanceof Error ? err : new Error(String(err)),
});
// Streaming failed - check if auth error, try guest mode
const isAuthError = err instanceof Error &&
(err.message.includes("403") || err.message.includes("401") ||
err.message.includes("auth") || err.message.includes("Forbidden"));
if (isAuthError) {
console.warn("Auth failed, trying guest chat mode");
// Try guest chat streaming
try {
await new Promise<void>((guestResolve, guestReject) => {
let hasReceivedData = false;
streamGuestChat(
request,
(chunk: string) => {
if (!hasReceivedData) {
hasReceivedData = true;
setIsLoading(false);
setIsStreaming(true);
setMessages((prev) => {
const updated = [...prev, { ...placeholderMessage }];
messagesRef.current = updated;
return updated;
});
}
setMessages((prev) => {
const updated = prev.map((msg) =>
msg.id === assistantMessageId ? { ...msg, content: msg.content + chunk } : msg
);
messagesRef.current = updated;
return updated;
});
},
() => {
streamingSucceeded = true;
setIsStreaming(false);
guestResolve();
},
(guestErr: Error) => {
guestReject(guestErr);
},
controller.signal
);
});
} catch (guestErr: unknown) {
// Guest also failed
setMessages((prev) => {
const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId);
messagesRef.current = withoutPlaceholder;
return withoutPlaceholder;
});
const errorMsg = guestErr instanceof Error ? guestErr.message : "Chat unavailable";
setError(`Unable to connect to chat: ${errorMsg}`);
setIsLoading(false);
return;
}
} else {
// Streaming failed — fall back to non-streaming
console.warn("Streaming failed, falling back to non-streaming", {
error: err instanceof Error ? err : new Error(String(err)),
});
setMessages((prev) => {
const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId);

View File

@@ -92,6 +92,141 @@ async function ensureCsrfTokenForStream(): Promise<string> {
return fetchCsrfToken();
}
/**
* Stream a guest chat message (no authentication required).
* Uses /api/chat/guest endpoint with shared LLM configuration.
*
* @param request - Chat request
* @param onChunk - Called with each token string as it arrives
* @param onComplete - Called when the stream finishes successfully
* @param onError - Called if the stream encounters an error
* @param signal - Optional AbortSignal for cancellation
*/
export function streamGuestChat(
request: ChatRequest,
onChunk: (chunk: string) => void,
onComplete: () => void,
onError: (error: Error) => void,
signal?: AbortSignal
): void {
void (async (): Promise<void> => {
try {
const response = await fetch(`${API_BASE_URL}/api/chat/guest`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
credentials: "include",
body: JSON.stringify({ messages: request.messages, stream: true }),
signal: signal ?? null,
});
if (!response.ok) {
const errorText = await response.text().catch(() => response.statusText);
throw new Error(`Guest chat failed: ${errorText}`);
}
if (!response.body) {
throw new Error("Response body is not readable");
}
const reader = response.body.getReader();
const decoder = new TextDecoder("utf-8");
let buffer = "";
let readerDone = false;
while (!readerDone) {
const { done, value } = await reader.read();
readerDone = done;
if (done) {
break;
}
buffer += decoder.decode(value, { stream: true });
// SSE messages are separated by double newlines
const parts = buffer.split("\n\n");
buffer = parts.pop() ?? "";
for (const part of parts) {
const trimmed = part.trim();
if (!trimmed) continue;
// Handle event: error format
const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed);
const dataMatch = /^data:\s*(.+)$/im.exec(trimmed);
if (eventMatch?.[1] === "error" && dataMatch?.[1]) {
try {
const errorData = JSON.parse(dataMatch[1].trim()) as {
error?: string;
};
throw new Error(errorData.error ?? "Stream error occurred");
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
throw new Error("Stream error occurred");
}
throw parseErr;
}
}
// Standard SSE format: data: {...}
for (const line of trimmed.split("\n")) {
if (!line.startsWith("data: ")) continue;
const data = line.slice("data: ".length).trim();
if (data === "[DONE]") {
onComplete();
return;
}
try {
const parsed: unknown = JSON.parse(data);
// Handle OpenAI format
const openAiChunk = parsed as OpenAiSseChunk;
if (openAiChunk.choices?.[0]?.delta?.content) {
onChunk(openAiChunk.choices[0].delta.content);
continue;
}
// Handle simple token format
const simpleChunk = parsed as SimpleTokenChunk;
if (simpleChunk.token) {
onChunk(simpleChunk.token);
continue;
}
if (simpleChunk.done === true) {
onComplete();
return;
}
const error = openAiChunk.error ?? simpleChunk.error;
if (error) {
throw new Error(error);
}
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
continue;
}
throw parseErr;
}
}
}
}
onComplete();
} catch (err: unknown) {
if (err instanceof DOMException && err.name === "AbortError") {
return;
}
onError(err instanceof Error ? err : new Error(String(err)));
}
})();
}
/**
* Stream a chat message from the LLM using SSE over fetch.
*