feat(chat): add guest chat mode for unauthenticated users
Some checks failed
ci/woodpecker/push/ci Pipeline failed
Some checks failed
ci/woodpecker/push/ci Pipeline failed
- Add POST /api/chat/guest endpoint (no auth required) - Add proxyGuestChat() method using configurable LLM endpoint - Add streamGuestChat() function to frontend chat API - Modify useChat to fall back to guest mode on auth errors (403/401) - Remove !user check from ChatInput disabled prop - Configure guest LLM via env vars: GUEST_LLM_URL, GUEST_LLM_API_KEY, GUEST_LLM_MODEL - Default guest LLM: http://10.1.1.42:11434/v1 (Ollama) with llama3.2 model
This commit is contained in:
@@ -92,6 +92,141 @@ async function ensureCsrfTokenForStream(): Promise<string> {
|
||||
return fetchCsrfToken();
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream a guest chat message (no authentication required).
|
||||
* Uses /api/chat/guest endpoint with shared LLM configuration.
|
||||
*
|
||||
* @param request - Chat request
|
||||
* @param onChunk - Called with each token string as it arrives
|
||||
* @param onComplete - Called when the stream finishes successfully
|
||||
* @param onError - Called if the stream encounters an error
|
||||
* @param signal - Optional AbortSignal for cancellation
|
||||
*/
|
||||
export function streamGuestChat(
|
||||
request: ChatRequest,
|
||||
onChunk: (chunk: string) => void,
|
||||
onComplete: () => void,
|
||||
onError: (error: Error) => void,
|
||||
signal?: AbortSignal
|
||||
): void {
|
||||
void (async (): Promise<void> => {
|
||||
try {
|
||||
const response = await fetch(`${API_BASE_URL}/api/chat/guest`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
credentials: "include",
|
||||
body: JSON.stringify({ messages: request.messages, stream: true }),
|
||||
signal: signal ?? null,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text().catch(() => response.statusText);
|
||||
throw new Error(`Guest chat failed: ${errorText}`);
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error("Response body is not readable");
|
||||
}
|
||||
|
||||
const reader = response.body.getReader();
|
||||
const decoder = new TextDecoder("utf-8");
|
||||
let buffer = "";
|
||||
|
||||
let readerDone = false;
|
||||
while (!readerDone) {
|
||||
const { done, value } = await reader.read();
|
||||
readerDone = done;
|
||||
if (done) {
|
||||
break;
|
||||
}
|
||||
|
||||
buffer += decoder.decode(value, { stream: true });
|
||||
|
||||
// SSE messages are separated by double newlines
|
||||
const parts = buffer.split("\n\n");
|
||||
buffer = parts.pop() ?? "";
|
||||
|
||||
for (const part of parts) {
|
||||
const trimmed = part.trim();
|
||||
if (!trimmed) continue;
|
||||
|
||||
// Handle event: error format
|
||||
const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed);
|
||||
const dataMatch = /^data:\s*(.+)$/im.exec(trimmed);
|
||||
|
||||
if (eventMatch?.[1] === "error" && dataMatch?.[1]) {
|
||||
try {
|
||||
const errorData = JSON.parse(dataMatch[1].trim()) as {
|
||||
error?: string;
|
||||
};
|
||||
throw new Error(errorData.error ?? "Stream error occurred");
|
||||
} catch (parseErr) {
|
||||
if (parseErr instanceof SyntaxError) {
|
||||
throw new Error("Stream error occurred");
|
||||
}
|
||||
throw parseErr;
|
||||
}
|
||||
}
|
||||
|
||||
// Standard SSE format: data: {...}
|
||||
for (const line of trimmed.split("\n")) {
|
||||
if (!line.startsWith("data: ")) continue;
|
||||
|
||||
const data = line.slice("data: ".length).trim();
|
||||
|
||||
if (data === "[DONE]") {
|
||||
onComplete();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed: unknown = JSON.parse(data);
|
||||
|
||||
// Handle OpenAI format
|
||||
const openAiChunk = parsed as OpenAiSseChunk;
|
||||
if (openAiChunk.choices?.[0]?.delta?.content) {
|
||||
onChunk(openAiChunk.choices[0].delta.content);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle simple token format
|
||||
const simpleChunk = parsed as SimpleTokenChunk;
|
||||
if (simpleChunk.token) {
|
||||
onChunk(simpleChunk.token);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (simpleChunk.done === true) {
|
||||
onComplete();
|
||||
return;
|
||||
}
|
||||
|
||||
const error = openAiChunk.error ?? simpleChunk.error;
|
||||
if (error) {
|
||||
throw new Error(error);
|
||||
}
|
||||
} catch (parseErr) {
|
||||
if (parseErr instanceof SyntaxError) {
|
||||
continue;
|
||||
}
|
||||
throw parseErr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
onComplete();
|
||||
} catch (err: unknown) {
|
||||
if (err instanceof DOMException && err.name === "AbortError") {
|
||||
return;
|
||||
}
|
||||
onError(err instanceof Error ? err : new Error(String(err)));
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream a chat message from the LLM using SSE over fetch.
|
||||
*
|
||||
|
||||
Reference in New Issue
Block a user