All checks were successful
ci/woodpecker/push/infra Pipeline was successful
- Renamed all jarvis-* to mosaic-* (generic for any deployment)
- Config files are .json.template with ${VAR} placeholders
- entrypoint.sh renders templates via envsubst at startup
- Ollama is optional: set OLLAMA_BASE_URL to auto-inject provider
- Model is configurable via OPENCLAW_MODEL env var
- No hardcoded IPs, keys, model names, or user preferences
- Updated README with full env var reference
54 lines
2.3 KiB
Bash
Executable File
54 lines
2.3 KiB
Bash
Executable File
#!/bin/sh
|
|
# Mosaic Agent Fleet — OpenClaw container entrypoint
|
|
# Renders config template from env vars, optionally adds Ollama provider, starts gateway
|
|
set -e
|
|
|
|
TEMPLATE="/config/openclaw.json.template"
|
|
CONFIG="/tmp/openclaw.json"
|
|
|
|
if [ ! -f "$TEMPLATE" ]; then
|
|
echo "ERROR: Config template not found at $TEMPLATE"
|
|
echo "Mount your config volume at /config with a .json.template file"
|
|
exit 1
|
|
fi
|
|
|
|
# Validate required env vars
|
|
: "${OPENCLAW_GATEWAY_TOKEN:?OPENCLAW_GATEWAY_TOKEN is required (generate: openssl rand -hex 32)}"
|
|
|
|
# Render template with env var substitution
|
|
envsubst < "$TEMPLATE" > "$CONFIG"
|
|
|
|
# If OLLAMA_BASE_URL is set, inject Ollama provider into config
|
|
if [ -n "$OLLAMA_BASE_URL" ]; then
|
|
# Use python3 if available, fall back to node
|
|
if command -v python3 >/dev/null 2>&1; then
|
|
python3 -c "
|
|
import json, sys
|
|
with open('$CONFIG') as f: cfg = json.load(f)
|
|
cfg.setdefault('models', {})['mode'] = 'merge'
|
|
cfg['models'].setdefault('providers', {})['ollama'] = {
|
|
'baseUrl': '$OLLAMA_BASE_URL/v1',
|
|
'api': 'openai-completions',
|
|
'models': [{'id': '${OLLAMA_MODEL:-cogito}', 'name': '${OLLAMA_MODEL:-cogito} (Local)', 'reasoning': False, 'input': ['text'], 'cost': {'input':0,'output':0,'cacheRead':0,'cacheWrite':0}, 'contextWindow': 128000, 'maxTokens': 8192}]
|
|
}
|
|
with open('$CONFIG','w') as f: json.dump(cfg, f, indent=2)
|
|
"
|
|
echo "Ollama provider added: $OLLAMA_BASE_URL (model: ${OLLAMA_MODEL:-cogito})"
|
|
elif command -v node >/dev/null 2>&1; then
|
|
node -e "
|
|
const fs = require('fs');
|
|
const cfg = JSON.parse(fs.readFileSync('$CONFIG','utf8'));
|
|
cfg.models = cfg.models || {}; cfg.models.mode = 'merge';
|
|
cfg.models.providers = cfg.models.providers || {};
|
|
cfg.models.providers.ollama = {baseUrl:'$OLLAMA_BASE_URL/v1',api:'openai-completions',models:[{id:'${OLLAMA_MODEL:-cogito}',name:'${OLLAMA_MODEL:-cogito} (Local)',reasoning:false,input:['text'],cost:{input:0,output:0,cacheRead:0,cacheWrite:0},contextWindow:128000,maxTokens:8192}]};
|
|
fs.writeFileSync('$CONFIG', JSON.stringify(cfg, null, 2));
|
|
"
|
|
echo "Ollama provider added: $OLLAMA_BASE_URL (model: ${OLLAMA_MODEL:-cogito})"
|
|
else
|
|
echo "WARNING: OLLAMA_BASE_URL set but no python3/node available to inject provider"
|
|
fi
|
|
fi
|
|
|
|
export OPENCLAW_CONFIG_PATH="$CONFIG"
|
|
exec openclaw gateway run --bind lan --auth token "$@"
|