Compare commits

..

1 Commits

Author SHA1 Message Date
8e3177fbbd ci: use localadmin user for deploy
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
2026-03-02 12:05:31 -06:00
9 changed files with 111 additions and 497 deletions

View File

@@ -340,8 +340,6 @@ steps:
# ─── Deploy to Docker Swarm (main only) ───────────────────── # ─── Deploy to Docker Swarm (main only) ─────────────────────
# ─── Deploy to Docker Swarm via Portainer (main only) ─────────────────────
deploy-swarm: deploy-swarm:
image: alpine:3 image: alpine:3
environment: environment:
@@ -349,30 +347,23 @@ steps:
from_secret: ssh_private_key from_secret: ssh_private_key
SSH_KNOWN_HOSTS: SSH_KNOWN_HOSTS:
from_secret: ssh_known_hosts from_secret: ssh_known_hosts
PORTAINER_URL:
from_secret: portainer_url
PORTAINER_API_KEY:
from_secret: portainer_api_key
commands: commands:
- apk add --no-cache curl openssh-client - apk add --no-cache openssh-client
- | - |
set -e set -e
echo "🚀 Deploying to Docker Swarm..." # Setup SSH
# Setup SSH for fallback
mkdir -p ~/.ssh mkdir -p ~/.ssh
echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts echo "$SSH_KNOWN_HOSTS" > ~/.ssh/known_hosts
chmod 600 ~/.ssh/known_hosts chmod 600 ~/.ssh/known_hosts
echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_ed25519 echo "$SSH_PRIVATE_KEY" > ~/.ssh/id_ed25519
chmod 600 ~/.ssh/id_ed25519 chmod 600 ~/.ssh/id_ed25519
# Force service updates (images are pulled from public registry) # Deploy to swarm
echo "🚀 Deploying to Docker Swarm..."
ssh -o StrictHostKeyChecking=no localadmin@10.1.1.45 \ ssh -o StrictHostKeyChecking=no localadmin@10.1.1.45 \
"docker service update --with-registry-auth --force mosaic-stack-api && \ "cd /opt/mosaic-stack && \
docker service update --with-registry-auth --force mosaic-stack-web && \ docker login git.mosaicstack.dev -u \$(echo \$GITEA_USER) -p \$GITEA_TOKEN || true && \
docker service update --with-registry-auth --force mosaic-stack-orchestrator && \ docker stack deploy -c docker-compose.yml mosaic"
docker service update --with-registry-auth --force mosaic-stack-coordinator && \
echo '✅ All services updated'"
when: when:
- branch: [main] - branch: [main]
event: [push, manual, tag] event: [push, manual, tag]

View File

@@ -1,13 +0,0 @@
-- MS21: Add admin, local auth, and invitation fields to users table
-- These columns were added to schema.prisma but never captured in a migration.
ALTER TABLE "users"
ADD COLUMN IF NOT EXISTS "deactivated_at" TIMESTAMPTZ,
ADD COLUMN IF NOT EXISTS "is_local_auth" BOOLEAN NOT NULL DEFAULT false,
ADD COLUMN IF NOT EXISTS "password_hash" TEXT,
ADD COLUMN IF NOT EXISTS "invited_by" UUID,
ADD COLUMN IF NOT EXISTS "invitation_token" TEXT,
ADD COLUMN IF NOT EXISTS "invited_at" TIMESTAMPTZ;
-- CreateIndex
CREATE UNIQUE INDEX IF NOT EXISTS "users_invitation_token_key" ON "users"("invitation_token");

View File

@@ -1,4 +1,14 @@
import { Body, Controller, HttpException, Logger, Post, Req, Res, UseGuards } from "@nestjs/common"; import {
Body,
Controller,
HttpException,
Logger,
Post,
Req,
Res,
UnauthorizedException,
UseGuards,
} from "@nestjs/common";
import type { Response } from "express"; import type { Response } from "express";
import { AuthGuard } from "../auth/guards/auth.guard"; import { AuthGuard } from "../auth/guards/auth.guard";
import type { MaybeAuthenticatedRequest } from "../auth/types/better-auth-request.interface"; import type { MaybeAuthenticatedRequest } from "../auth/types/better-auth-request.interface";
@@ -6,72 +16,16 @@ import { ChatStreamDto } from "./chat-proxy.dto";
import { ChatProxyService } from "./chat-proxy.service"; import { ChatProxyService } from "./chat-proxy.service";
@Controller("chat") @Controller("chat")
@UseGuards(AuthGuard)
export class ChatProxyController { export class ChatProxyController {
private readonly logger = new Logger(ChatProxyController.name); private readonly logger = new Logger(ChatProxyController.name);
constructor(private readonly chatProxyService: ChatProxyService) {} constructor(private readonly chatProxyService: ChatProxyService) {}
// POST /api/chat/guest
// Guest chat endpoint - no authentication required
// Uses a shared LLM configuration for unauthenticated users
@Post("guest")
async guestChat(
@Body() body: ChatStreamDto,
@Req() req: MaybeAuthenticatedRequest,
@Res() res: Response
): Promise<void> {
const abortController = new AbortController();
req.once("close", () => {
abortController.abort();
});
res.setHeader("Content-Type", "text/event-stream");
res.setHeader("Cache-Control", "no-cache");
res.setHeader("Connection", "keep-alive");
res.setHeader("X-Accel-Buffering", "no");
try {
const upstreamResponse = await this.chatProxyService.proxyGuestChat(
body.messages,
abortController.signal
);
const upstreamContentType = upstreamResponse.headers.get("content-type");
if (upstreamContentType) {
res.setHeader("Content-Type", upstreamContentType);
}
if (!upstreamResponse.body) {
throw new Error("LLM response did not include a stream body");
}
for await (const chunk of upstreamResponse.body as unknown as AsyncIterable<Uint8Array>) {
if (res.writableEnded || res.destroyed) {
break;
}
res.write(Buffer.from(chunk));
}
} catch (error: unknown) {
this.logStreamError(error);
if (!res.writableEnded && !res.destroyed) {
res.write("event: error\n");
res.write(`data: ${JSON.stringify({ error: this.toSafeClientMessage(error) })}\n\n`);
}
} finally {
if (!res.writableEnded && !res.destroyed) {
res.end();
}
}
}
// POST /api/chat/stream // POST /api/chat/stream
// Request: { messages: Array<{role, content}> } // Request: { messages: Array<{role, content}> }
// Response: SSE stream of chat completion events // Response: SSE stream of chat completion events
// Requires authentication - uses user's personal OpenClaw container
@Post("stream") @Post("stream")
@UseGuards(AuthGuard)
async streamChat( async streamChat(
@Body() body: ChatStreamDto, @Body() body: ChatStreamDto,
@Req() req: MaybeAuthenticatedRequest, @Req() req: MaybeAuthenticatedRequest,
@@ -79,8 +33,7 @@ export class ChatProxyController {
): Promise<void> { ): Promise<void> {
const userId = req.user?.id; const userId = req.user?.id;
if (!userId) { if (!userId) {
this.logger.warn("streamChat called without user ID after AuthGuard"); throw new UnauthorizedException("No authenticated user found on request");
throw new HttpException("Authentication required", 401);
} }
const abortController = new AbortController(); const abortController = new AbortController();

View File

@@ -4,14 +4,11 @@ import {
Logger, Logger,
ServiceUnavailableException, ServiceUnavailableException,
} from "@nestjs/common"; } from "@nestjs/common";
import { ConfigService } from "@nestjs/config";
import { ContainerLifecycleService } from "../container-lifecycle/container-lifecycle.service"; import { ContainerLifecycleService } from "../container-lifecycle/container-lifecycle.service";
import { PrismaService } from "../prisma/prisma.service"; import { PrismaService } from "../prisma/prisma.service";
import type { ChatMessage } from "./chat-proxy.dto"; import type { ChatMessage } from "./chat-proxy.dto";
const DEFAULT_OPENCLAW_MODEL = "openclaw:default"; const DEFAULT_OPENCLAW_MODEL = "openclaw:default";
const DEFAULT_GUEST_LLM_URL = "http://10.1.1.42:11434/v1";
const DEFAULT_GUEST_LLM_MODEL = "llama3.2";
interface ContainerConnection { interface ContainerConnection {
url: string; url: string;
@@ -24,8 +21,7 @@ export class ChatProxyService {
constructor( constructor(
private readonly prisma: PrismaService, private readonly prisma: PrismaService,
private readonly containerLifecycle: ContainerLifecycleService, private readonly containerLifecycle: ContainerLifecycleService
private readonly config: ConfigService
) {} ) {}
// Get the user's OpenClaw container URL and mark it active. // Get the user's OpenClaw container URL and mark it active.
@@ -83,65 +79,6 @@ export class ChatProxyService {
} }
} }
/**
* Proxy guest chat request to configured LLM endpoint.
* Uses environment variables for configuration:
* - GUEST_LLM_URL: OpenAI-compatible endpoint URL
* - GUEST_LLM_API_KEY: API key (optional, for cloud providers)
* - GUEST_LLM_MODEL: Model name to use
*/
async proxyGuestChat(messages: ChatMessage[], signal?: AbortSignal): Promise<Response> {
const llmUrl = this.config.get<string>("GUEST_LLM_URL") ?? DEFAULT_GUEST_LLM_URL;
const llmApiKey = this.config.get<string>("GUEST_LLM_API_KEY");
const llmModel = this.config.get<string>("GUEST_LLM_MODEL") ?? DEFAULT_GUEST_LLM_MODEL;
const headers: Record<string, string> = {
"Content-Type": "application/json",
};
if (llmApiKey) {
headers.Authorization = `Bearer ${llmApiKey}`;
}
const requestInit: RequestInit = {
method: "POST",
headers,
body: JSON.stringify({
messages,
model: llmModel,
stream: true,
}),
};
if (signal) {
requestInit.signal = signal;
}
try {
this.logger.debug(`Guest chat proxying to ${llmUrl} with model ${llmModel}`);
const response = await fetch(`${llmUrl}/chat/completions`, requestInit);
if (!response.ok) {
const detail = await this.readResponseText(response);
const status = `${String(response.status)} ${response.statusText}`.trim();
this.logger.warn(
detail ? `Guest LLM returned ${status}: ${detail}` : `Guest LLM returned ${status}`
);
throw new BadGatewayException(`Guest LLM returned ${status}`);
}
return response;
} catch (error: unknown) {
if (error instanceof BadGatewayException) {
throw error;
}
const message = error instanceof Error ? error.message : String(error);
this.logger.warn(`Failed to proxy guest chat request: ${message}`);
throw new ServiceUnavailableException("Failed to proxy guest chat to LLM");
}
}
private async getContainerConnection(userId: string): Promise<ContainerConnection> { private async getContainerConnection(userId: string): Promise<ContainerConnection> {
const connection = await this.containerLifecycle.ensureRunning(userId); const connection = await this.containerLifecycle.ensureRunning(userId);
await this.containerLifecycle.touch(userId); await this.containerLifecycle.touch(userId);

View File

@@ -601,21 +601,9 @@ class TestCoordinatorIntegration:
coordinator = Coordinator(queue_manager=queue_manager, poll_interval=0.02) coordinator = Coordinator(queue_manager=queue_manager, poll_interval=0.02)
task = asyncio.create_task(coordinator.start()) task = asyncio.create_task(coordinator.start())
await asyncio.sleep(0.5) # Allow time for processing
# Poll for completion with timeout instead of fixed sleep
deadline = asyncio.get_event_loop().time() + 5.0 # 5 second timeout
while asyncio.get_event_loop().time() < deadline:
all_completed = True
for i in range(157, 162):
item = queue_manager.get_item(i)
if item is None or item.status != QueueItemStatus.COMPLETED:
all_completed = False
break
if all_completed:
break
await asyncio.sleep(0.05)
await coordinator.stop() await coordinator.stop()
task.cancel() task.cancel()
try: try:
await task await task

View File

@@ -352,7 +352,7 @@ export const Chat = forwardRef<ChatRef, ChatProps>(function Chat(
<div className="mx-auto max-w-4xl px-4 py-4 lg:px-8"> <div className="mx-auto max-w-4xl px-4 py-4 lg:px-8">
<ChatInput <ChatInput
onSend={handleSendMessage} onSend={handleSendMessage}
disabled={isChatLoading} disabled={isChatLoading || !user}
inputRef={inputRef} inputRef={inputRef}
isStreaming={isStreaming} isStreaming={isStreaming}
onStopStreaming={abortStream} onStopStreaming={abortStream}

View File

@@ -7,7 +7,6 @@ import { useState, useCallback, useRef } from "react";
import { import {
sendChatMessage, sendChatMessage,
streamChatMessage, streamChatMessage,
streamGuestChat,
type ChatMessage as ApiChatMessage, type ChatMessage as ApiChatMessage,
} from "@/lib/api/chat"; } from "@/lib/api/chat";
import { createConversation, updateConversation, getIdea, type Idea } from "@/lib/api/ideas"; import { createConversation, updateConversation, getIdea, type Idea } from "@/lib/api/ideas";
@@ -279,131 +278,68 @@ export function useChat(options: UseChatOptions = {}): UseChatReturn {
return; return;
} }
// Streaming failed - check if auth error, try guest mode // Streaming failed — fall back to non-streaming
const isAuthError = console.warn("Streaming failed, falling back to non-streaming", {
err instanceof Error && error: err instanceof Error ? err : new Error(String(err)),
(err.message.includes("403") || });
err.message.includes("401") ||
err.message.includes("auth") ||
err.message.includes("Forbidden"));
if (isAuthError) { setMessages((prev) => {
console.warn("Auth failed, trying guest chat mode"); const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId);
messagesRef.current = withoutPlaceholder;
return withoutPlaceholder;
});
setIsStreaming(false);
// Try guest chat streaming try {
try { const response = await sendChatMessage(request);
await new Promise<void>((guestResolve, guestReject) => {
let hasReceivedData = false;
streamGuestChat( const assistantMessage: Message = {
request, id: `assistant-${Date.now().toString()}`,
(chunk: string) => { role: "assistant",
if (!hasReceivedData) { content: response.message.content,
hasReceivedData = true; createdAt: new Date().toISOString(),
setIsLoading(false); model: response.model,
setIsStreaming(true); promptTokens: response.promptEvalCount ?? 0,
setMessages((prev) => { completionTokens: response.evalCount ?? 0,
const updated = [...prev, { ...placeholderMessage }]; totalTokens: (response.promptEvalCount ?? 0) + (response.evalCount ?? 0),
messagesRef.current = updated; };
return updated;
});
}
setMessages((prev) => {
const updated = prev.map((msg) =>
msg.id === assistantMessageId ? { ...msg, content: msg.content + chunk } : msg
);
messagesRef.current = updated;
return updated;
});
},
() => {
streamingSucceeded = true;
setIsStreaming(false);
guestResolve();
},
(guestErr: Error) => {
guestReject(guestErr);
},
controller.signal
);
});
} catch (guestErr: unknown) {
// Guest also failed
setMessages((prev) => {
const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId);
messagesRef.current = withoutPlaceholder;
return withoutPlaceholder;
});
const errorMsg = guestErr instanceof Error ? guestErr.message : "Chat unavailable";
setError(`Unable to connect to chat: ${errorMsg}`);
setIsLoading(false);
return;
}
} else {
// Streaming failed — fall back to non-streaming
console.warn("Streaming failed, falling back to non-streaming", {
error: err instanceof Error ? err : new Error(String(err)),
});
setMessages((prev) => { setMessages((prev) => {
const withoutPlaceholder = prev.filter((m) => m.id !== assistantMessageId); const updated = [...prev, assistantMessage];
messagesRef.current = withoutPlaceholder; messagesRef.current = updated;
return withoutPlaceholder; return updated;
}); });
setIsStreaming(false);
try { streamingSucceeded = true;
const response = await sendChatMessage(request); } catch (fallbackErr: unknown) {
const errorMsg =
fallbackErr instanceof Error ? fallbackErr.message : "Failed to send message";
setError("Unable to send message. Please try again.");
onError?.(fallbackErr instanceof Error ? fallbackErr : new Error(errorMsg));
console.error("Failed to send chat message", {
error: fallbackErr,
errorType: "LLM_ERROR",
conversationId: conversationIdRef.current,
messageLength: content.length,
messagePreview: content.substring(0, 50),
model,
messageCount: messagesRef.current.length,
timestamp: new Date().toISOString(),
});
const assistantMessage: Message = { const errorMessage: Message = {
id: `assistant-${Date.now().toString()}`, id: `error-${String(Date.now())}`,
role: "assistant", role: "assistant",
content: response.message.content, content: "Something went wrong. Please try again.",
createdAt: new Date().toISOString(), createdAt: new Date().toISOString(),
model: response.model, };
promptTokens: response.promptEvalCount ?? 0, setMessages((prev) => {
completionTokens: response.evalCount ?? 0, const updated = [...prev, errorMessage];
totalTokens: (response.promptEvalCount ?? 0) + (response.evalCount ?? 0), messagesRef.current = updated;
}; return updated;
});
setMessages((prev) => { setIsLoading(false);
const updated = [...prev, assistantMessage]; return;
messagesRef.current = updated;
return updated;
});
streamingSucceeded = true;
} catch (fallbackErr: unknown) {
const errorMsg =
fallbackErr instanceof Error ? fallbackErr.message : "Failed to send message";
setError("Unable to send message. Please try again.");
onError?.(fallbackErr instanceof Error ? fallbackErr : new Error(errorMsg));
console.error("Failed to send chat message", {
error: fallbackErr,
errorType: "LLM_ERROR",
conversationId: conversationIdRef.current,
messageLength: content.length,
messagePreview: content.substring(0, 50),
model,
messageCount: messagesRef.current.length,
timestamp: new Date().toISOString(),
});
const errorMessage: Message = {
id: `error-${String(Date.now())}`,
role: "assistant",
content: "Something went wrong. Please try again.",
createdAt: new Date().toISOString(),
};
setMessages((prev) => {
const updated = [...prev, errorMessage];
messagesRef.current = updated;
return updated;
});
setIsLoading(false);
return;
}
} }
} }

View File

@@ -92,141 +92,6 @@ async function ensureCsrfTokenForStream(): Promise<string> {
return fetchCsrfToken(); return fetchCsrfToken();
} }
/**
* Stream a guest chat message (no authentication required).
* Uses /api/chat/guest endpoint with shared LLM configuration.
*
* @param request - Chat request
* @param onChunk - Called with each token string as it arrives
* @param onComplete - Called when the stream finishes successfully
* @param onError - Called if the stream encounters an error
* @param signal - Optional AbortSignal for cancellation
*/
export function streamGuestChat(
request: ChatRequest,
onChunk: (chunk: string) => void,
onComplete: () => void,
onError: (error: Error) => void,
signal?: AbortSignal
): void {
void (async (): Promise<void> => {
try {
const response = await fetch(`${API_BASE_URL}/api/chat/guest`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
credentials: "include",
body: JSON.stringify({ messages: request.messages, stream: true }),
signal: signal ?? null,
});
if (!response.ok) {
const errorText = await response.text().catch(() => response.statusText);
throw new Error(`Guest chat failed: ${errorText}`);
}
if (!response.body) {
throw new Error("Response body is not readable");
}
const reader = response.body.getReader();
const decoder = new TextDecoder("utf-8");
let buffer = "";
let readerDone = false;
while (!readerDone) {
const { done, value } = await reader.read();
readerDone = done;
if (done) {
break;
}
buffer += decoder.decode(value, { stream: true });
// SSE messages are separated by double newlines
const parts = buffer.split("\n\n");
buffer = parts.pop() ?? "";
for (const part of parts) {
const trimmed = part.trim();
if (!trimmed) continue;
// Handle event: error format
const eventMatch = /^event:\s*(\S+)\n/i.exec(trimmed);
const dataMatch = /^data:\s*(.+)$/im.exec(trimmed);
if (eventMatch?.[1] === "error" && dataMatch?.[1]) {
try {
const errorData = JSON.parse(dataMatch[1].trim()) as {
error?: string;
};
throw new Error(errorData.error ?? "Stream error occurred");
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
throw new Error("Stream error occurred");
}
throw parseErr;
}
}
// Standard SSE format: data: {...}
for (const line of trimmed.split("\n")) {
if (!line.startsWith("data: ")) continue;
const data = line.slice("data: ".length).trim();
if (data === "[DONE]") {
onComplete();
return;
}
try {
const parsed: unknown = JSON.parse(data);
// Handle OpenAI format
const openAiChunk = parsed as OpenAiSseChunk;
if (openAiChunk.choices?.[0]?.delta?.content) {
onChunk(openAiChunk.choices[0].delta.content);
continue;
}
// Handle simple token format
const simpleChunk = parsed as SimpleTokenChunk;
if (simpleChunk.token) {
onChunk(simpleChunk.token);
continue;
}
if (simpleChunk.done === true) {
onComplete();
return;
}
const error = openAiChunk.error ?? simpleChunk.error;
if (error) {
throw new Error(error);
}
} catch (parseErr) {
if (parseErr instanceof SyntaxError) {
continue;
}
throw parseErr;
}
}
}
}
onComplete();
} catch (err: unknown) {
if (err instanceof DOMException && err.name === "AbortError") {
return;
}
onError(err instanceof Error ? err : new Error(String(err)));
}
})();
}
/** /**
* Stream a chat message from the LLM using SSE over fetch. * Stream a chat message from the LLM using SSE over fetch.
* *

View File

@@ -9,8 +9,6 @@
# - OpenBao: Standalone container (see docker-compose.openbao.yml) # - OpenBao: Standalone container (see docker-compose.openbao.yml)
# - Authentik: External OIDC provider # - Authentik: External OIDC provider
# - Ollama: External AI inference # - Ollama: External AI inference
# - PostgreSQL: Provided by the openbrain stack (openbrain_brain-db)
# Deploy openbrain stack before this stack.
# #
# Usage (Portainer): # Usage (Portainer):
# 1. Stacks -> Add Stack -> Upload or paste # 1. Stacks -> Add Stack -> Upload or paste
@@ -38,75 +36,37 @@
# Required vars use plain ${VAR} — the app validates at startup. # Required vars use plain ${VAR} — the app validates at startup.
# #
# ============================================== # ==============================================
# DATABASE (openbrain_brain-db — external)
# ==============================================
#
# This stack uses the PostgreSQL instance from the openbrain stack.
# The openbrain stack must be deployed first and its brain-internal
# overlay network must exist.
#
# Required env vars for DB access:
# BRAIN_DB_ADMIN_USER — openbrain superuser (default: openbrain)
# BRAIN_DB_ADMIN_PASSWORD — openbrain superuser password
# (must match openbrain stack POSTGRES_PASSWORD)
# POSTGRES_USER — mosaic application DB user (created by mosaic-db-init)
# POSTGRES_PASSWORD — mosaic application DB password
# POSTGRES_DB — mosaic application database name (default: mosaic)
#
# ==============================================
services: services:
# ============================================ # ============================================
# DATABASE INIT # CORE INFRASTRUCTURE
# ============================================ # ============================================
# ====================== # ======================
# Mosaic Database Init # PostgreSQL Database
# ====================== # ======================
# Creates the mosaic application user and database in the shared postgres:
# openbrain PostgreSQL instance (openbrain_brain-db). image: git.mosaicstack.dev/mosaic/stack-postgres:${IMAGE_TAG:-latest}
# Runs once and exits. Idempotent — safe to run on every deploy.
mosaic-db-init:
image: postgres:17-alpine
environment: environment:
PGHOST: openbrain_brain-db POSTGRES_USER: ${POSTGRES_USER}
PGPORT: 5432 POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGUSER: ${BRAIN_DB_ADMIN_USER:-openbrain} POSTGRES_DB: ${POSTGRES_DB}
PGPASSWORD: ${BRAIN_DB_ADMIN_PASSWORD} POSTGRES_SHARED_BUFFERS: ${POSTGRES_SHARED_BUFFERS:-256MB}
MOSAIC_USER: ${POSTGRES_USER} POSTGRES_EFFECTIVE_CACHE_SIZE: ${POSTGRES_EFFECTIVE_CACHE_SIZE:-1GB}
MOSAIC_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_MAX_CONNECTIONS: ${POSTGRES_MAX_CONNECTIONS:-100}
MOSAIC_DB: ${POSTGRES_DB:-mosaic} volumes:
entrypoint: ["sh", "-c"] - postgres_data:/var/lib/postgresql/data
command: healthcheck:
- | test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
until pg_isready -h openbrain_brain-db -p 5432 -U $${PGUSER}; do interval: 10s
echo "Waiting for openbrain_brain-db..." timeout: 5s
sleep 2 retries: 5
done start_period: 30s
echo "Database ready. Creating mosaic user and database..."
psql -h openbrain_brain-db -U $${PGUSER} -tc "SELECT 1 FROM pg_roles WHERE rolname='$${MOSAIC_USER}'" | grep -q 1 || \
psql -h openbrain_brain-db -U $${PGUSER} -c "CREATE USER $${MOSAIC_USER} WITH PASSWORD '$${MOSAIC_PASSWORD}';"
psql -h openbrain_brain-db -U $${PGUSER} -tc "SELECT 1 FROM pg_database WHERE datname='$${MOSAIC_DB}'" | grep -q 1 || \
psql -h openbrain_brain-db -U $${PGUSER} -c "CREATE DATABASE $${MOSAIC_DB} OWNER $${MOSAIC_USER} ENCODING 'UTF8' LC_COLLATE='C' LC_CTYPE='C' TEMPLATE template0;"
echo "Enabling required extensions in $${MOSAIC_DB}..."
psql -h openbrain_brain-db -U $${PGUSER} -d $${MOSAIC_DB} -c "CREATE EXTENSION IF NOT EXISTS vector;"
psql -h openbrain_brain-db -U $${PGUSER} -d $${MOSAIC_DB} -c "CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";"
echo "Mosaic database ready: $${MOSAIC_DB}"
networks: networks:
- openbrain-brain-internal - internal
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
delay: 5s
max_attempts: 5
# ============================================
# CORE INFRASTRUCTURE
# ============================================
# ====================== # ======================
# Valkey Cache # Valkey Cache
@@ -145,7 +105,7 @@ services:
NODE_ENV: production NODE_ENV: production
PORT: ${API_PORT:-3001} PORT: ${API_PORT:-3001}
API_HOST: ${API_HOST:-0.0.0.0} API_HOST: ${API_HOST:-0.0.0.0}
DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@openbrain_brain-db:5432/${POSTGRES_DB:-mosaic} DATABASE_URL: postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB}
VALKEY_URL: redis://valkey:6379 VALKEY_URL: redis://valkey:6379
# Auth (external Authentik) # Auth (external Authentik)
OIDC_ENABLED: ${OIDC_ENABLED:-false} OIDC_ENABLED: ${OIDC_ENABLED:-false}
@@ -203,7 +163,6 @@ services:
networks: networks:
- internal - internal
- traefik-public - traefik-public
- openbrain-brain-internal
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@@ -348,36 +307,36 @@ services:
# ====================== # ======================
# Synapse Database Init # Synapse Database Init
# ====================== # ======================
# Creates the 'synapse' database in the shared openbrain PostgreSQL instance. # Creates the 'synapse' database in the shared PostgreSQL instance.
# Runs once and exits. Idempotent — safe to run on every deploy. # Runs once and exits. Idempotent — safe to run on every deploy.
synapse-db-init: synapse-db-init:
image: postgres:17-alpine image: postgres:17-alpine
environment: environment:
PGHOST: openbrain_brain-db PGHOST: postgres
PGPORT: 5432 PGPORT: 5432
PGUSER: ${BRAIN_DB_ADMIN_USER:-openbrain} PGUSER: ${POSTGRES_USER}
PGPASSWORD: ${BRAIN_DB_ADMIN_PASSWORD} PGPASSWORD: ${POSTGRES_PASSWORD}
SYNAPSE_DB: ${SYNAPSE_POSTGRES_DB} SYNAPSE_DB: ${SYNAPSE_POSTGRES_DB}
SYNAPSE_USER: ${SYNAPSE_POSTGRES_USER} SYNAPSE_USER: ${SYNAPSE_POSTGRES_USER}
SYNAPSE_PASSWORD: ${SYNAPSE_POSTGRES_PASSWORD} SYNAPSE_PASSWORD: ${SYNAPSE_POSTGRES_PASSWORD}
entrypoint: ["sh", "-c"] entrypoint: ["sh", "-c"]
command: command:
- | - |
until pg_isready -h openbrain_brain-db -p 5432 -U $${PGUSER}; do until pg_isready -h postgres -p 5432 -U $${PGUSER}; do
echo "Waiting for openbrain_brain-db..." echo "Waiting for PostgreSQL..."
sleep 2 sleep 2
done done
echo "Database ready. Creating Synapse user and database..." echo "PostgreSQL is ready. Creating Synapse database and user..."
psql -h openbrain_brain-db -U $${PGUSER} -tc "SELECT 1 FROM pg_roles WHERE rolname='$${SYNAPSE_USER}'" | grep -q 1 || \ psql -h postgres -U $${PGUSER} -tc "SELECT 1 FROM pg_roles WHERE rolname='$${SYNAPSE_USER}'" | grep -q 1 || \
psql -h openbrain_brain-db -U $${PGUSER} -c "CREATE USER $${SYNAPSE_USER} WITH PASSWORD '$${SYNAPSE_PASSWORD}';" psql -h postgres -U $${PGUSER} -c "CREATE USER $${SYNAPSE_USER} WITH PASSWORD '$${SYNAPSE_PASSWORD}';"
psql -h openbrain_brain-db -U $${PGUSER} -tc "SELECT 1 FROM pg_database WHERE datname='$${SYNAPSE_DB}'" | grep -q 1 || \ psql -h postgres -U $${PGUSER} -tc "SELECT 1 FROM pg_database WHERE datname='$${SYNAPSE_DB}'" | grep -q 1 || \
psql -h openbrain_brain-db -U $${PGUSER} -c "CREATE DATABASE $${SYNAPSE_DB} OWNER $${SYNAPSE_USER} ENCODING 'UTF8' LC_COLLATE='C' LC_CTYPE='C' TEMPLATE template0;" psql -h postgres -U $${PGUSER} -c "CREATE DATABASE $${SYNAPSE_DB} OWNER $${SYNAPSE_USER} ENCODING 'UTF8' LC_COLLATE='C' LC_CTYPE='C' TEMPLATE template0;"
echo "Synapse database ready: $${SYNAPSE_DB}" echo "Synapse database ready: $${SYNAPSE_DB}"
networks: networks:
- openbrain-brain-internal - internal
deploy: deploy:
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@@ -492,6 +451,7 @@ services:
# Volumes # Volumes
# ====================== # ======================
volumes: volumes:
postgres_data:
valkey_data: valkey_data:
orchestrator_workspace: orchestrator_workspace:
speaches_models: speaches_models:
@@ -504,6 +464,3 @@ networks:
driver: overlay driver: overlay
traefik-public: traefik-public:
external: true external: true
openbrain-brain-internal:
external: true
name: openbrain_brain-internal