Compare commits
1 Commits
feat/feder
...
docs/feder
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47aac682f5 |
@@ -63,10 +63,8 @@
|
||||
"class-validator": "^0.15.1",
|
||||
"dotenv": "^17.3.1",
|
||||
"fastify": "^5.0.0",
|
||||
"ioredis": "^5.10.0",
|
||||
"node-cron": "^4.2.1",
|
||||
"openai": "^6.32.0",
|
||||
"postgres": "^3.4.8",
|
||||
"reflect-metadata": "^0.2.0",
|
||||
"rxjs": "^7.8.0",
|
||||
"socket.io": "^4.8.0",
|
||||
|
||||
@@ -20,12 +20,10 @@ import { Logger, ValidationPipe } from '@nestjs/common';
|
||||
import { FastifyAdapter, type NestFastifyApplication } from '@nestjs/platform-fastify';
|
||||
import helmet from '@fastify/helmet';
|
||||
import { listSsoStartupWarnings } from '@mosaicstack/auth';
|
||||
import { loadConfig } from '@mosaicstack/config';
|
||||
import { AppModule } from './app.module.js';
|
||||
import { mountAuthHandler } from './auth/auth.controller.js';
|
||||
import { mountMcpHandler } from './mcp/mcp.controller.js';
|
||||
import { McpService } from './mcp/mcp.service.js';
|
||||
import { detectAndAssertTier, TierDetectionError } from '@mosaicstack/storage';
|
||||
|
||||
async function bootstrap(): Promise<void> {
|
||||
const logger = new Logger('Bootstrap');
|
||||
@@ -34,20 +32,6 @@ async function bootstrap(): Promise<void> {
|
||||
throw new Error('BETTER_AUTH_SECRET is required');
|
||||
}
|
||||
|
||||
// Pre-flight: assert all external services required by the configured tier
|
||||
// are reachable. Runs before NestFactory.create() so failures are visible
|
||||
// immediately with actionable remediation hints.
|
||||
const mosaicConfig = loadConfig();
|
||||
try {
|
||||
await detectAndAssertTier(mosaicConfig);
|
||||
} catch (err) {
|
||||
if (err instanceof TierDetectionError) {
|
||||
logger.error(`Tier detection failed: ${err.message}`);
|
||||
logger.error(`Remediation: ${err.remediation}`);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
for (const warning of listSsoStartupWarnings()) {
|
||||
logger.warn(warning);
|
||||
}
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
# docker-compose.federated.yml — Federated tier overlay
|
||||
#
|
||||
# USAGE:
|
||||
# docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
#
|
||||
# This file is a standalone overlay for the Mosaic federated tier.
|
||||
# It is NOT an extension of docker-compose.yml — it defines its own services
|
||||
# and named volumes so it can run independently of the base dev stack.
|
||||
#
|
||||
# IMPORTANT — HOST PORT CONFLICTS:
|
||||
# The federated services bind the same host ports as the base dev stack
|
||||
# (5433 for Postgres, 6380 for Valkey). You must stop the base dev stack
|
||||
# before starting the federated stack on the same machine:
|
||||
# docker compose down
|
||||
# docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
#
|
||||
# pgvector extension:
|
||||
# The vector extension is created automatically at first boot via
|
||||
# ./infra/pg-init/01-extensions.sql (CREATE EXTENSION IF NOT EXISTS vector).
|
||||
#
|
||||
# Tier configuration:
|
||||
# Used by `mosaic` instances configured with `tier: federated`.
|
||||
# DEFAULT_FEDERATED_CONFIG points at:
|
||||
# postgresql://mosaic:mosaic@localhost:5433/mosaic
|
||||
|
||||
services:
|
||||
postgres-federated:
|
||||
image: pgvector/pgvector:pg17
|
||||
profiles: [federated]
|
||||
ports:
|
||||
- '${PG_FEDERATED_HOST_PORT:-5433}:5432'
|
||||
environment:
|
||||
POSTGRES_USER: mosaic
|
||||
POSTGRES_PASSWORD: mosaic
|
||||
POSTGRES_DB: mosaic
|
||||
volumes:
|
||||
- pg_federated_data:/var/lib/postgresql/data
|
||||
- ./infra/pg-init:/docker-entrypoint-initdb.d:ro
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready -U mosaic']
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
valkey-federated:
|
||||
image: valkey/valkey:8-alpine
|
||||
profiles: [federated]
|
||||
ports:
|
||||
- '${VALKEY_FEDERATED_HOST_PORT:-6380}:6379'
|
||||
volumes:
|
||||
- valkey_federated_data:/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'valkey-cli', 'ping']
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
volumes:
|
||||
pg_federated_data:
|
||||
valkey_federated_data:
|
||||
@@ -1,116 +1,73 @@
|
||||
# Mission Manifest — MVP
|
||||
# Mission Manifest — Install UX v2
|
||||
|
||||
> Top-level rollup tracking Mosaic Stack MVP execution.
|
||||
> Workstreams have their own manifests; this document is the source of truth for MVP scope, status, and history.
|
||||
> Owner: Orchestrator (sole writer).
|
||||
> Persistent document tracking full mission scope, status, and session history.
|
||||
> Updated by the orchestrator at each phase transition and milestone completion.
|
||||
|
||||
## Mission
|
||||
|
||||
**ID:** mvp-20260312
|
||||
**Statement:** Ship a self-hosted, multi-user AI agent platform that consolidates the user's disparate jarvis-brain usage across home and USC workstations into a single coherent system reachable via three first-class surfaces — webUI, TUI, and CLI — with federation as the data-layer mechanism that makes cross-host agent sessions work in real time without copying user data across the boundary.
|
||||
**Phase:** Execution (workstream W1 in planning-complete state)
|
||||
**Current Workstream:** W1 — Federation v1
|
||||
**Progress:** 0 / 1 declared workstreams complete (more workstreams will be declared as scope is refined)
|
||||
**Status:** active (continuous since 2026-03-13)
|
||||
**Last Updated:** 2026-04-19 (manifest authored at the rollup level; install-ux-v2 archived; W1 federation planning landed via PR #468)
|
||||
**Source PRD:** [docs/PRD.md](./PRD.md) — Mosaic Stack v0.1.0
|
||||
**Scratchpad:** [docs/scratchpads/mvp-20260312.md](./scratchpads/mvp-20260312.md) (active since 2026-03-13; 14 prior sessions of phase-based execution)
|
||||
**ID:** install-ux-v2-20260405
|
||||
**Statement:** The install-ux-hardening mission shipped the plumbing (uninstall, masked password, hooks consent, unified flow, headless path), but the first real end-to-end run surfaced a critical regression and a collection of UX failings that make the wizard feel neither quick nor intelligent. This mission closes the bootstrap regression as a hotfix, then rethinks the first-run experience around a provider-first, intent-driven flow with a drill-down main menu and a genuinely fast quick-start.
|
||||
**Phase:** Execution
|
||||
**Current Milestone:** IUV-M03
|
||||
**Progress:** 2 / 3 milestones
|
||||
**Status:** active
|
||||
**Last Updated:** 2026-04-05 (IUV-M02 complete — CORS/FQDN + skill installer rework)
|
||||
**Parent Mission:** [install-ux-hardening-20260405](./archive/missions/install-ux-hardening-20260405/MISSION-MANIFEST.md) (complete — `mosaic-v0.0.25`)
|
||||
|
||||
## Context
|
||||
|
||||
Jarvis (v0.2.0) was a single-host Python/Next.js assistant. The user runs sessions across 3–4 workstations split between home and USC. Today every session reaches back to a single jarvis-brain checkout, which is brittle (offline-hostile, no consolidation, no shared state beyond a single repo). A prior OpenBrain attempt punished offline use, introduced cache/latency/opacity pain, and tightly coupled every session to a remote service.
|
||||
Real-run testing of `@mosaicstack/mosaic@0.0.25` uncovered:
|
||||
|
||||
The MVP solution: keep each user's home gateway as the source of truth, connect gateways gateway-to-gateway over mTLS with scoped read-only data exposure, and expose the unified experience through three coherent surfaces:
|
||||
|
||||
- **webUI** — the primary visual control plane (Next.js + React 19, `apps/web`)
|
||||
- **TUI** — the terminal-native interface for agent work (`packages/mosaic` wizard + Pi TUI)
|
||||
- **CLI** — `mosaic` command for scripted/headless workflows
|
||||
|
||||
Federation is required NOW because it unblocks cross-host consolidation; it is necessary but not sufficient for MVP. Additional workstreams will be declared as their scope solidifies.
|
||||
|
||||
## Prior Execution (March 13 → April 5)
|
||||
|
||||
This manifest was authored on 2026-04-19 to rollup work that began 2026-03-13. Before this date, MVP work was tracked via phase-based Gitea milestones and the scratchpad — there was no rollup manifest at the `docs/MISSION-MANIFEST.md` path (the slot was occupied by sub-mission manifests for `install-ux-hardening` and then `install-ux-v2`).
|
||||
|
||||
Prior execution outline (full detail in [scratchpads/mvp-20260312.md](./scratchpads/mvp-20260312.md)):
|
||||
|
||||
- **Phases 0 → 7** (Gitea milestones `ms-157` → `ms-164`, issues #1–#59): foundation, core API, agent layer, web dashboard, memory, remote control, CLI/tools, polish/beta. Substantially shipped by Session 13.
|
||||
- **Phase 8** (Gitea milestone `ms-165`, issues #160–#172): platform architecture extension — teams, workspaces, `/provider` OAuth, preferences, etc. Wave-based execution plan defined at Session 14.
|
||||
- **Sub-missions** during the gap: `install-ux-hardening` (complete, `mosaic-v0.0.25`), `install-ux-v2` (complete on 2026-04-19, `0.0.27` → `0.0.29`). Both archived under `docs/archive/missions/`.
|
||||
|
||||
Going forward, MVP execution is tracked through the **Workstreams** table below. Phase-based issue numbering is preserved on Gitea but is no longer the primary control plane.
|
||||
|
||||
## Cross-Cutting MVP Requirements
|
||||
|
||||
These apply to every workstream and every milestone. A workstream cannot ship if it breaks any of them.
|
||||
|
||||
| # | Requirement |
|
||||
| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| MVP-X1 | Three-surface parity: every user-facing capability is reachable via webUI **and** TUI **and** CLI (read paths at minimum; mutating paths where applicable to the surface). |
|
||||
| MVP-X2 | Multi-tenant isolation is enforced at every boundary; no cross-user leakage under any circumstance. |
|
||||
| MVP-X3 | Auth via BetterAuth (existing); SSO adapters per PRD; admin bootstrap remains a one-shot. |
|
||||
| MVP-X4 | Three quality gates green before push: `pnpm typecheck`, `pnpm lint`, `pnpm format:check`. |
|
||||
| MVP-X5 | Federated tier (PG + pgvector + Valkey) is the canonical MVP deployment topology; local/standalone tiers continue to work for non-federated installs but are not the MVP target. |
|
||||
| MVP-X6 | OTEL tracing on every request path; `traceparent` propagated across the federation boundary in both directions. |
|
||||
| MVP-X7 | Trunk merge strategy: branch from `main`, squash-merge via PR, never push to `main` directly. |
|
||||
1. **Critical:** admin bootstrap fails with HTTP 400 `property email should not exist` — `bootstrap.controller.ts` uses `import type { BootstrapSetupDto }`, erasing the class at runtime. Nest's `@Body()` falls back to plain `Object` metatype, and ValidationPipe with `forbidNonWhitelisted` rejects every property. One-character fix (drop the `type` keyword), but it blocks the happy path of the release that just shipped.
|
||||
2. The wizard reports `✔ Wizard complete` and `✔ Done` _after_ the bootstrap 400 — failure only propagates in headless mode (`wizard.ts:147`).
|
||||
3. The gateway port prompt does not prefill `14242` in the input buffer.
|
||||
4. `"What is Mosaic?"` intro copy does not mention Pi SDK (the actual agent runtime behind Claude/Codex/OpenCode).
|
||||
5. CORS origin prompt is confusing — the user should be able to supply an FQDN/hostname and have the system derive the CORS value.
|
||||
6. Skill / additional feature install section is unusable in practice.
|
||||
7. Quick-start asks far too many questions to be meaningfully "quick".
|
||||
8. No drill-down main menu — everything is a linear interrogation.
|
||||
9. Provider setup happens late and without intelligence. An OpenClaw-style provider-first flow would let the user describe what they want in natural language, have the agent expound on it, and have the agent choose its own name based on that intent.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
The MVP is complete when ALL declared workstreams are complete AND every cross-cutting requirement is verifiable on a live two-host deployment (woltje.com ↔ uscllc.com).
|
||||
- [x] AC-1: Admin bootstrap completes successfully end-to-end on a fresh install (DTO value import, no forbidNonWhitelisted regression); covered by an integration or e2e test that exercises the real DTO binding. _(PR #440)_
|
||||
- [x] AC-2: Wizard fails loudly (non-zero exit, clear error) when the bootstrap stage returns `completed: false`, in both interactive and headless modes. No more silent `✔ Wizard complete` after a 400. _(PR #440)_
|
||||
- [x] AC-3: Gateway port prompt prefills `14242` in the input field (user can press Enter to accept). _(PR #440)_
|
||||
- [x] AC-4: `"What is Mosaic?"` intro copy mentions Pi SDK as the underlying agent runtime. _(PR #440)_
|
||||
- [x] AC-5: Release `mosaic-v0.0.26` tagged and published to the Gitea npm registry, unblocking the 0.0.25 happy path. _(tag: mosaic-v0.0.26, registry: 0.0.26 live)_
|
||||
- [ ] AC-6: CORS origin prompt replaced with FQDN/hostname input; CORS string is derived from that.
|
||||
- [ ] AC-7: Skill / additional feature install section is reworked until it is actually usable end-to-end (worker defines the concrete failure modes during diagnosis).
|
||||
- [ ] AC-8: First-run flow has a drill-down main menu with at least `Plugins` (Recommended / Custom), `Providers`, and the other top-level configuration groups. Linear interrogation is gone.
|
||||
- [ ] AC-9: `Quick Start` path completes with a minimal, curated set of questions (target: under 90 seconds for a returning user; define the exact baseline during design).
|
||||
- [ ] AC-10: Provider setup happens first, driven by a natural-language intake prompt. The agent expounds on the user's intent and chooses its own name based on that intent (OpenClaw-style). Naming is confirmable / overridable.
|
||||
- [ ] AC-11: All milestones ship as merged PRs with green CI and closed issues.
|
||||
|
||||
- [ ] AC-MVP-1: All declared workstreams reach `complete` status with merged PRs and green CI
|
||||
- [ ] AC-MVP-2: A user session on the home gateway can transparently query work-gateway data subject to scope, with no data persisted across the boundary
|
||||
- [ ] AC-MVP-3: The same user-facing capability is reachable from webUI, TUI, and CLI (per MVP-X1)
|
||||
- [ ] AC-MVP-4: Two-gateway production deployment (woltje.com ↔ uscllc.com) operational ≥7 days without incident
|
||||
- [ ] AC-MVP-5: All cross-cutting requirements (MVP-X1 → MVP-X7) verified with evidence
|
||||
- [ ] AC-MVP-6: PRD `docs/PRD.md` "In Scope (v0.1.0 Beta)" list mapped to evidence (each item: shipped / explicitly deferred with rationale)
|
||||
## Milestones
|
||||
|
||||
## Workstreams
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------- | ------------------------------------------------------------ | ----------- | ---------------------- | ----- | ---------- | ---------- |
|
||||
| 1 | IUV-M01 | Hotfix: bootstrap DTO + wizard failure + port prefill + copy | complete | fix/bootstrap-hotfix | #436 | 2026-04-05 | 2026-04-05 |
|
||||
| 2 | IUV-M02 | UX polish: CORS/FQDN, skill installer rework | complete | feat/install-ux-polish | #437 | 2026-04-05 | 2026-04-05 |
|
||||
| 3 | IUV-M03 | Provider-first intelligent flow + drill-down main menu | not-started | feat/install-ux-intent | #438 | — | — |
|
||||
|
||||
| # | ID | Name | Status | Manifest | Notes |
|
||||
| --- | --- | ------------------------------------------- | ----------------- | ----------------------------------------------------------------------- | --------------------------------------------------- |
|
||||
| W1 | FED | Federation v1 | planning-complete | [docs/federation/MISSION-MANIFEST.md](./federation/MISSION-MANIFEST.md) | 7 milestones, ~175K tokens, issues #460–#466 filed |
|
||||
| W2+ | TBD | (additional workstreams declared as scoped) | — | — | Scope creep is expected and explicitly accommodated |
|
||||
## Subagent Delegation Plan
|
||||
|
||||
### Likely Additional Workstreams (Not Yet Declared)
|
||||
|
||||
These are anticipated based on the PRD `In Scope` list but are NOT counted toward MVP completion until they have their own manifest, milestones, and tracking issues. Listed here so the orchestrator knows what's likely coming.
|
||||
|
||||
- Web dashboard parity with PRD scope (chat, tasks, projects, missions, agent status surfaces)
|
||||
- Pi TUI integration for terminal-native agent work
|
||||
- CLI completeness for headless / scripted workflows that mirror webUI capability
|
||||
- Remote control plugins (Discord priority, then Telegram)
|
||||
- Multi-user / SSO finishing (BetterAuth + Authentik/WorkOS/Keycloak adapters per PRD)
|
||||
- LLM provider expansion (Anthropic, Codex, Z.ai, Ollama, LM Studio, llama.cpp) + routing matrix
|
||||
- MCP server/client capability + skill import interface
|
||||
- Brain (`@mosaicstack/brain`) as the structured data layer on PG + vector
|
||||
|
||||
When any of these solidify into a real workstream, add a row to the Workstreams table, create a workstream-level manifest under `docs/{workstream}/MISSION-MANIFEST.md`, and file tracking issues.
|
||||
| Milestone | Recommended Tier | Rationale |
|
||||
| --------- | ---------------- | --------------------------------------------------------------------- |
|
||||
| IUV-M01 | sonnet | Tight bug cluster with known fix sites + small release cycle |
|
||||
| IUV-M02 | sonnet | UX rework, moderate surface, diagnostic-heavy for the skill installer |
|
||||
| IUV-M03 | opus | Architectural redesign of first-run flow, state machine + LLM intake |
|
||||
|
||||
## Risks
|
||||
|
||||
- **Scope creep is the named risk.** Workstreams will be added; the rule is that each must have its own manifest + milestones + acceptance criteria before it consumes execution capacity.
|
||||
- **Federation urgency vs. surface parity** — federation is being built first because it unblocks the user, but webUI/TUI/CLI parity (MVP-X1) cannot slip indefinitely. Track surface coverage explicitly when each workstream lands.
|
||||
- **Three-surface fan-out** — the same capability exposed three ways multiplies test surface and design effort. Default to a shared API/contract layer, then thin surface adapters; resist surface-specific business logic.
|
||||
- **Federated-tier dependency** — MVP requires PG + pgvector + Valkey; users on local/standalone tier cannot federate. This is intentional but must be communicated clearly in the wizard.
|
||||
- **Hotfix regression surface** — the `import type` → `import` fix on the DTO class is one character but needs an integration test that binds the real DTO, not just a controller unit test, to prevent the same class-erasure regression from sneaking back in.
|
||||
- **LLM-driven intake latency / offline** — M03's provider-first intent flow assumes an available LLM call to expound on user input and choose a name. Offline installs need a deterministic fallback.
|
||||
- **Menu vs. linear back-compat** — M03 changes the top-level flow shape; existing `tools/install.sh --yes` + env-var headless path must continue to work.
|
||||
- **Scope creep in M03** — "redesign the wizard" can absorb arbitrary work. Keep it bounded with explicit non-goals.
|
||||
|
||||
## Out of Scope (MVP)
|
||||
## Out of Scope
|
||||
|
||||
- SaaS / multi-tenant revenue model — personal/family/team tool only
|
||||
- Mobile native apps — responsive web only
|
||||
- Public npm registry publishing — Gitea registry only
|
||||
- Voice / video agent interaction
|
||||
- Full OpenClaw feature parity — inspiration only
|
||||
- Calendar / GLPI / Woodpecker tooling integrations (deferred to post-MVP)
|
||||
|
||||
## Session History
|
||||
|
||||
For sessions 1–14 (phase-based execution, 2026-03-13 → 2026-03-15), see [scratchpads/mvp-20260312.md](./scratchpads/mvp-20260312.md). Sessions below are tracked at the rollup level.
|
||||
|
||||
| Session | Date | Runtime | Outcome |
|
||||
| ------- | ---------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| S15 | 2026-04-19 | claude | MVP rollup manifest authored. Install-ux-v2 archived (IUV-M03 retroactively closed — shipped via PR #446 + releases 0.0.27 → 0.0.29). Federation v1 planning landed via PR #468. W1 manifest reachable at `docs/federation/MISSION-MANIFEST.md`. Next: kickoff FED-M1. |
|
||||
|
||||
## Next Step
|
||||
|
||||
Begin W1 / FED-M1 — federated tier infrastructure. Task breakdown lives at [docs/federation/TASKS.md](./federation/TASKS.md).
|
||||
- Migrating the wizard to a GUI / web UI (still terminal-first)
|
||||
- Replacing the Gitea registry or the Woodpecker publish pipeline
|
||||
- Multi-tenant / multi-user onboarding (still single-admin bootstrap)
|
||||
- Reworking `mosaic uninstall` (M01 of the parent mission — stable)
|
||||
|
||||
@@ -1,40 +1,39 @@
|
||||
# Tasks — MVP (Top-Level Rollup)
|
||||
# Tasks — Install UX v2
|
||||
|
||||
> Single-writer: orchestrator only. Workers read but never modify.
|
||||
>
|
||||
> **Mission:** mvp-20260312
|
||||
> **Manifest:** [docs/MISSION-MANIFEST.md](./MISSION-MANIFEST.md)
|
||||
>
|
||||
> This file is a **rollup**. Per-workstream task breakdowns live in workstream task files
|
||||
> (e.g. `docs/federation/TASKS.md`). Workers operating inside a workstream should treat
|
||||
> the workstream file as their primary task source; this file exists for orchestrator-level
|
||||
> visibility into MVP-wide state.
|
||||
>
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed`
|
||||
> **Mission:** install-ux-v2-20260405
|
||||
> **Schema:** `| id | status | description | issue | agent | branch | depends_on | estimate | notes |`
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed` | `needs-qa`
|
||||
> **Agent values:** `codex` | `sonnet` | `haiku` | `opus` | `—` (auto)
|
||||
|
||||
## Workstream Rollup
|
||||
## Milestone 1 — Hotfix: bootstrap DTO + wizard failure + port prefill + copy (IUV-M01)
|
||||
|
||||
| id | status | workstream | progress | tasks file | notes |
|
||||
| --- | ----------------- | ------------------- | ---------------- | ------------------------------------------------- | --------------------------------------------------------------- |
|
||||
| W1 | planning-complete | Federation v1 (FED) | 0 / 7 milestones | [docs/federation/TASKS.md](./federation/TASKS.md) | M1 task breakdown populated; M2–M7 deferred to mission planning |
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | ------ | -------------------- | ---------- | -------- | --------------------------------------------------------------------------------------- |
|
||||
| IUV-01-01 | done | Fix `apps/gateway/src/admin/bootstrap.controller.ts:16` — switch `import type { BootstrapSetupDto }` to a value import so Nest's `@Body()` binds the real class | #436 | sonnet | fix/bootstrap-hotfix | — | 3K | PR #440 merged `0ae932ab` |
|
||||
| IUV-01-02 | done | Add integration / e2e test that POSTs `/api/bootstrap/setup` with `{name,email,password}` against a real Nest app instance and asserts 201 — NOT a mocked controller unit test | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-01 | 10K | `apps/gateway/src/admin/bootstrap.e2e.spec.ts` — 4 tests; unplugin-swc added for vitest |
|
||||
| IUV-01-03 | done | `packages/mosaic/src/wizard.ts:147` — propagate `!bootstrapResult.completed` as a wizard failure in **interactive** mode too (not only headless); non-zero exit + no `✔ Wizard complete` line | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-02 | 5K | removed `&& headlessRun` guard |
|
||||
| IUV-01-04 | done | Gateway port prompt prefills `14242` in the input buffer — investigate why `promptPort`'s `defaultValue` isn't reaching the user-visible input | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-03 | 5K | added `initialValue` through prompter interface → clack |
|
||||
| IUV-01-05 | done | `"What is Mosaic?"` intro copy updated to mention Pi SDK as the underlying agent runtime (alongside Claude Code / Codex / OpenCode) | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-04 | 2K | `packages/mosaic/src/stages/welcome.ts` |
|
||||
| IUV-01-06 | done | Tests + code review + PR merge + tag `mosaic-v0.0.26` + Gitea release + npm registry republish | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-05 | 10K | PRs #440/#441/#442 merged; tag `mosaic-v0.0.26`; registry latest=0.0.26 ✓ |
|
||||
|
||||
## Cross-Cutting Tracking
|
||||
## Milestone 2 — UX polish: CORS/FQDN, skill installer rework (IUV-M02)
|
||||
|
||||
These are MVP-level checks that don't belong to any single workstream. Updated by the orchestrator at each session.
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------- | ---------- | -------- | ---------------------------------------------------------------------- |
|
||||
| IUV-02-01 | done | Replace CORS origin prompt with FQDN / hostname input; derive the CORS value internally; default to `localhost` with clear help text | #437 | sonnet | feat/install-ux-polish | — | 10K | `deriveCorsOrigin()` pure fn; MOSAIC_HOSTNAME headless var; PR #444 |
|
||||
| IUV-02-02 | done | Diagnose and document the concrete failure modes of the current skill / additional feature install section end-to-end | #437 | sonnet | feat/install-ux-polish | IUV-02-01 | 8K | selection→install gap, silent catch{}, no whitelist concept |
|
||||
| IUV-02-03 | done | Rework the skill installer so it is usable end-to-end (selection, install, verify, failure reporting) | #437 | sonnet | feat/install-ux-polish | IUV-02-02 | 20K | MOSAIC_INSTALL_SKILLS env var whitelist; SyncSkillsResult typed return |
|
||||
| IUV-02-04 | done | Tests + code review + PR merge | #437 | sonnet | feat/install-ux-polish | IUV-02-03 | 10K | 18 new tests (13 CORS + 5 skills); PR #444 merged `172bacb3` |
|
||||
|
||||
| id | status | description | notes |
|
||||
| ------- | ----------- | -------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
||||
| MVP-T01 | done | Author MVP-level manifest at `docs/MISSION-MANIFEST.md` | This session (2026-04-19); PR pending |
|
||||
| MVP-T02 | done | Archive install-ux-v2 mission state to `docs/archive/missions/install-ux-v2-20260405/` | IUV-M03 retroactively closed (shipped via PR #446 + releases 0.0.27→0.0.29) |
|
||||
| MVP-T03 | done | Land federation v1 planning artifacts on `main` | PR #468 merged 2026-04-19 (commit `66512550`) |
|
||||
| MVP-T04 | not-started | Sync `.mosaic/orchestrator/mission.json` MVP slot with this manifest (milestone enumeration, etc.) | Coord state file; consider whether to repopulate via `mosaic coord` or accept hand-edit |
|
||||
| MVP-T05 | in-progress | Kick off W1 / FED-M1 — federated tier infrastructure | Session 16 (2026-04-19): FED-M1-01 in-progress on `feat/federation-m1-tier-config` |
|
||||
| MVP-T06 | not-started | Declare additional workstreams (web dashboard, TUI/CLI parity, remote control, etc.) as scope solidifies | Track each new workstream by adding a row to the Workstream Rollup |
|
||||
## Milestone 3 — Provider-first intelligent flow + drill-down main menu (IUV-M03)
|
||||
|
||||
## Pointer to Active Workstream
|
||||
|
||||
Active workstream is **W1 — Federation v1**. Workers should:
|
||||
|
||||
1. Read [docs/federation/MISSION-MANIFEST.md](./federation/MISSION-MANIFEST.md) for workstream scope
|
||||
2. Read [docs/federation/TASKS.md](./federation/TASKS.md) for the next pending task
|
||||
3. Follow per-task agent + tier guidance from the workstream manifest
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ----- | ---------------------- | ---------- | -------- | ------------------------------------------------------------- |
|
||||
| IUV-03-01 | not-started | Design doc: new first-run state machine — main menu (Plugins / Providers / …), Quick Start vs Custom paths, provider-first flow, intent intake + naming loop | #438 | opus | feat/install-ux-intent | — | 15K | scratchpad + explicit non-goals |
|
||||
| IUV-03-02 | not-started | Implement drill-down main menu (Plugins: Recommended / Custom, Providers, …) as the top-level entry point of `mosaic wizard` | #438 | opus | feat/install-ux-intent | IUV-03-01 | 25K | |
|
||||
| IUV-03-03 | not-started | Quick Start path: curated minimum question set — define the exact baseline, delete everything else from the fast path | #438 | opus | feat/install-ux-intent | IUV-03-02 | 15K | |
|
||||
| IUV-03-04 | not-started | Provider-first natural-language intake: user describes intent → agent expounds → agent proposes a name (confirmable / overridable) — OpenClaw-style | #438 | opus | feat/install-ux-intent | IUV-03-03 | 25K | offline fallback required (deterministic default name + path) |
|
||||
| IUV-03-05 | not-started | Preserve backward-compat: headless path (`MOSAIC_ASSUME_YES=1` + env vars) still works end-to-end; `tools/install.sh --yes` unchanged | #438 | opus | feat/install-ux-intent | IUV-03-04 | 10K | |
|
||||
| IUV-03-06 | not-started | Tests + code review + PR merge + `mosaic-v0.0.27` release | #438 | opus | feat/install-ux-intent | IUV-03-05 | 15K | |
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
# Mission Manifest — Install UX v2
|
||||
|
||||
> Persistent document tracking full mission scope, status, and session history.
|
||||
> Updated by the orchestrator at each phase transition and milestone completion.
|
||||
|
||||
## Mission
|
||||
|
||||
**ID:** install-ux-v2-20260405
|
||||
**Statement:** The install-ux-hardening mission shipped the plumbing (uninstall, masked password, hooks consent, unified flow, headless path), but the first real end-to-end run surfaced a critical regression and a collection of UX failings that make the wizard feel neither quick nor intelligent. This mission closes the bootstrap regression as a hotfix, then rethinks the first-run experience around a provider-first, intent-driven flow with a drill-down main menu and a genuinely fast quick-start.
|
||||
**Phase:** Closed
|
||||
**Current Milestone:** —
|
||||
**Progress:** 3 / 3 milestones
|
||||
**Status:** complete
|
||||
**Last Updated:** 2026-04-19 (archived during MVP manifest authoring; IUV-M03 substantively shipped via PR #446 — drill-down menu + provider-first flow + quick start; releases 0.0.27 → 0.0.29)
|
||||
**Archived to:** `docs/archive/missions/install-ux-v2-20260405/`
|
||||
**Parent Mission:** [install-ux-hardening-20260405](./archive/missions/install-ux-hardening-20260405/MISSION-MANIFEST.md) (complete — `mosaic-v0.0.25`)
|
||||
|
||||
## Context
|
||||
|
||||
Real-run testing of `@mosaicstack/mosaic@0.0.25` uncovered:
|
||||
|
||||
1. **Critical:** admin bootstrap fails with HTTP 400 `property email should not exist` — `bootstrap.controller.ts` uses `import type { BootstrapSetupDto }`, erasing the class at runtime. Nest's `@Body()` falls back to plain `Object` metatype, and ValidationPipe with `forbidNonWhitelisted` rejects every property. One-character fix (drop the `type` keyword), but it blocks the happy path of the release that just shipped.
|
||||
2. The wizard reports `✔ Wizard complete` and `✔ Done` _after_ the bootstrap 400 — failure only propagates in headless mode (`wizard.ts:147`).
|
||||
3. The gateway port prompt does not prefill `14242` in the input buffer.
|
||||
4. `"What is Mosaic?"` intro copy does not mention Pi SDK (the actual agent runtime behind Claude/Codex/OpenCode).
|
||||
5. CORS origin prompt is confusing — the user should be able to supply an FQDN/hostname and have the system derive the CORS value.
|
||||
6. Skill / additional feature install section is unusable in practice.
|
||||
7. Quick-start asks far too many questions to be meaningfully "quick".
|
||||
8. No drill-down main menu — everything is a linear interrogation.
|
||||
9. Provider setup happens late and without intelligence. An OpenClaw-style provider-first flow would let the user describe what they want in natural language, have the agent expound on it, and have the agent choose its own name based on that intent.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [x] AC-1: Admin bootstrap completes successfully end-to-end on a fresh install (DTO value import, no forbidNonWhitelisted regression); covered by an integration or e2e test that exercises the real DTO binding. _(PR #440)_
|
||||
- [x] AC-2: Wizard fails loudly (non-zero exit, clear error) when the bootstrap stage returns `completed: false`, in both interactive and headless modes. No more silent `✔ Wizard complete` after a 400. _(PR #440)_
|
||||
- [x] AC-3: Gateway port prompt prefills `14242` in the input field (user can press Enter to accept). _(PR #440)_
|
||||
- [x] AC-4: `"What is Mosaic?"` intro copy mentions Pi SDK as the underlying agent runtime. _(PR #440)_
|
||||
- [x] AC-5: Release `mosaic-v0.0.26` tagged and published to the Gitea npm registry, unblocking the 0.0.25 happy path. _(tag: mosaic-v0.0.26, registry: 0.0.26 live)_
|
||||
- [ ] AC-6: CORS origin prompt replaced with FQDN/hostname input; CORS string is derived from that.
|
||||
- [ ] AC-7: Skill / additional feature install section is reworked until it is actually usable end-to-end (worker defines the concrete failure modes during diagnosis).
|
||||
- [ ] AC-8: First-run flow has a drill-down main menu with at least `Plugins` (Recommended / Custom), `Providers`, and the other top-level configuration groups. Linear interrogation is gone.
|
||||
- [ ] AC-9: `Quick Start` path completes with a minimal, curated set of questions (target: under 90 seconds for a returning user; define the exact baseline during design).
|
||||
- [ ] AC-10: Provider setup happens first, driven by a natural-language intake prompt. The agent expounds on the user's intent and chooses its own name based on that intent (OpenClaw-style). Naming is confirmable / overridable.
|
||||
- [ ] AC-11: All milestones ship as merged PRs with green CI and closed issues.
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------- | ------------------------------------------------------------ | -------- | ---------------------- | ----- | ---------- | ---------- |
|
||||
| 1 | IUV-M01 | Hotfix: bootstrap DTO + wizard failure + port prefill + copy | complete | fix/bootstrap-hotfix | #436 | 2026-04-05 | 2026-04-05 |
|
||||
| 2 | IUV-M02 | UX polish: CORS/FQDN, skill installer rework | complete | feat/install-ux-polish | #437 | 2026-04-05 | 2026-04-05 |
|
||||
| 3 | IUV-M03 | Provider-first intelligent flow + drill-down main menu | complete | feat/install-ux-intent | #438 | 2026-04-05 | 2026-04-19 |
|
||||
|
||||
## Subagent Delegation Plan
|
||||
|
||||
| Milestone | Recommended Tier | Rationale |
|
||||
| --------- | ---------------- | --------------------------------------------------------------------- |
|
||||
| IUV-M01 | sonnet | Tight bug cluster with known fix sites + small release cycle |
|
||||
| IUV-M02 | sonnet | UX rework, moderate surface, diagnostic-heavy for the skill installer |
|
||||
| IUV-M03 | opus | Architectural redesign of first-run flow, state machine + LLM intake |
|
||||
|
||||
## Risks
|
||||
|
||||
- **Hotfix regression surface** — the `import type` → `import` fix on the DTO class is one character but needs an integration test that binds the real DTO, not just a controller unit test, to prevent the same class-erasure regression from sneaking back in.
|
||||
- **LLM-driven intake latency / offline** — M03's provider-first intent flow assumes an available LLM call to expound on user input and choose a name. Offline installs need a deterministic fallback.
|
||||
- **Menu vs. linear back-compat** — M03 changes the top-level flow shape; existing `tools/install.sh --yes` + env-var headless path must continue to work.
|
||||
- **Scope creep in M03** — "redesign the wizard" can absorb arbitrary work. Keep it bounded with explicit non-goals.
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- Migrating the wizard to a GUI / web UI (still terminal-first)
|
||||
- Replacing the Gitea registry or the Woodpecker publish pipeline
|
||||
- Multi-tenant / multi-user onboarding (still single-admin bootstrap)
|
||||
- Reworking `mosaic uninstall` (M01 of the parent mission — stable)
|
||||
@@ -1,39 +0,0 @@
|
||||
# Tasks — Install UX v2
|
||||
|
||||
> Single-writer: orchestrator only. Workers read but never modify.
|
||||
>
|
||||
> **Mission:** install-ux-v2-20260405
|
||||
> **Schema:** `| id | status | description | issue | agent | branch | depends_on | estimate | notes |`
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed` | `needs-qa`
|
||||
> **Agent values:** `codex` | `sonnet` | `haiku` | `opus` | `—` (auto)
|
||||
|
||||
## Milestone 1 — Hotfix: bootstrap DTO + wizard failure + port prefill + copy (IUV-M01)
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | ------ | -------------------- | ---------- | -------- | --------------------------------------------------------------------------------------- |
|
||||
| IUV-01-01 | done | Fix `apps/gateway/src/admin/bootstrap.controller.ts:16` — switch `import type { BootstrapSetupDto }` to a value import so Nest's `@Body()` binds the real class | #436 | sonnet | fix/bootstrap-hotfix | — | 3K | PR #440 merged `0ae932ab` |
|
||||
| IUV-01-02 | done | Add integration / e2e test that POSTs `/api/bootstrap/setup` with `{name,email,password}` against a real Nest app instance and asserts 201 — NOT a mocked controller unit test | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-01 | 10K | `apps/gateway/src/admin/bootstrap.e2e.spec.ts` — 4 tests; unplugin-swc added for vitest |
|
||||
| IUV-01-03 | done | `packages/mosaic/src/wizard.ts:147` — propagate `!bootstrapResult.completed` as a wizard failure in **interactive** mode too (not only headless); non-zero exit + no `✔ Wizard complete` line | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-02 | 5K | removed `&& headlessRun` guard |
|
||||
| IUV-01-04 | done | Gateway port prompt prefills `14242` in the input buffer — investigate why `promptPort`'s `defaultValue` isn't reaching the user-visible input | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-03 | 5K | added `initialValue` through prompter interface → clack |
|
||||
| IUV-01-05 | done | `"What is Mosaic?"` intro copy updated to mention Pi SDK as the underlying agent runtime (alongside Claude Code / Codex / OpenCode) | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-04 | 2K | `packages/mosaic/src/stages/welcome.ts` |
|
||||
| IUV-01-06 | done | Tests + code review + PR merge + tag `mosaic-v0.0.26` + Gitea release + npm registry republish | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-05 | 10K | PRs #440/#441/#442 merged; tag `mosaic-v0.0.26`; registry latest=0.0.26 ✓ |
|
||||
|
||||
## Milestone 2 — UX polish: CORS/FQDN, skill installer rework (IUV-M02)
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------- | ---------- | -------- | ---------------------------------------------------------------------- |
|
||||
| IUV-02-01 | done | Replace CORS origin prompt with FQDN / hostname input; derive the CORS value internally; default to `localhost` with clear help text | #437 | sonnet | feat/install-ux-polish | — | 10K | `deriveCorsOrigin()` pure fn; MOSAIC_HOSTNAME headless var; PR #444 |
|
||||
| IUV-02-02 | done | Diagnose and document the concrete failure modes of the current skill / additional feature install section end-to-end | #437 | sonnet | feat/install-ux-polish | IUV-02-01 | 8K | selection→install gap, silent catch{}, no whitelist concept |
|
||||
| IUV-02-03 | done | Rework the skill installer so it is usable end-to-end (selection, install, verify, failure reporting) | #437 | sonnet | feat/install-ux-polish | IUV-02-02 | 20K | MOSAIC_INSTALL_SKILLS env var whitelist; SyncSkillsResult typed return |
|
||||
| IUV-02-04 | done | Tests + code review + PR merge | #437 | sonnet | feat/install-ux-polish | IUV-02-03 | 10K | 18 new tests (13 CORS + 5 skills); PR #444 merged `172bacb3` |
|
||||
|
||||
## Milestone 3 — Provider-first intelligent flow + drill-down main menu (IUV-M03)
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ----- | ---------------------- | ---------- | -------- | ------------------------------------------------------------- |
|
||||
| IUV-03-01 | not-started | Design doc: new first-run state machine — main menu (Plugins / Providers / …), Quick Start vs Custom paths, provider-first flow, intent intake + naming loop | #438 | opus | feat/install-ux-intent | — | 15K | scratchpad + explicit non-goals |
|
||||
| IUV-03-02 | not-started | Implement drill-down main menu (Plugins: Recommended / Custom, Providers, …) as the top-level entry point of `mosaic wizard` | #438 | opus | feat/install-ux-intent | IUV-03-01 | 25K | |
|
||||
| IUV-03-03 | not-started | Quick Start path: curated minimum question set — define the exact baseline, delete everything else from the fast path | #438 | opus | feat/install-ux-intent | IUV-03-02 | 15K | |
|
||||
| IUV-03-04 | not-started | Provider-first natural-language intake: user describes intent → agent expounds → agent proposes a name (confirmable / overridable) — OpenClaw-style | #438 | opus | feat/install-ux-intent | IUV-03-03 | 25K | offline fallback required (deterministic default name + path) |
|
||||
| IUV-03-05 | not-started | Preserve backward-compat: headless path (`MOSAIC_ASSUME_YES=1` + env vars) still works end-to-end; `tools/install.sh --yes` unchanged | #438 | opus | feat/install-ux-intent | IUV-03-04 | 10K | |
|
||||
| IUV-03-06 | not-started | Tests + code review + PR merge + `mosaic-v0.0.27` release | #438 | opus | feat/install-ux-intent | IUV-03-05 | 15K | |
|
||||
@@ -16,13 +16,13 @@
|
||||
Goal: Gateway runs in `federated` tier with containerized PG+pgvector+Valkey. No federation logic yet. Existing standalone behavior does not regress.
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------- | ---------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| FED-M1-01 | done | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | sonnet | feat/federation-m1-tier-config | — | 4K | Shipped in PR #470. Renamed `team` → `standalone`; added `team` deprecation alias; added `DEFAULT_FEDERATED_CONFIG`. |
|
||||
| FED-M1-02 | done | Author `docker-compose.federated.yml` as an overlay profile: Postgres 17 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | sonnet | feat/federation-m1-compose | FED-M1-01 | 5K | Shipped in PR #471. Overlay defines `postgres-federated`/`valkey-federated`, profile-gated, with pg-init for pgvector extension. |
|
||||
| FED-M1-03 | done | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | sonnet | feat/federation-m1-pgvector | FED-M1-02 | 8K | Shipped in PR #472. `enableVector` flag on postgres StorageConfig; idempotent CREATE EXTENSION before migrations. |
|
||||
| FED-M1-04 | done | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | sonnet | feat/federation-m1-detector | FED-M1-03 | 8K | Shipped in PR #473. 12 tests; 5s timeouts on probes; pgvector library/permission discrimination; rejects non-bullmq for federated. |
|
||||
| FED-M1-05 | done | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | sonnet | feat/federation-m1-migrate | FED-M1-04 | 10K | Shipped in PR #474. `mosaic storage migrate-tier`; DrizzleMigrationSource (corrects P0 found in review); 32 tests; idempotent. |
|
||||
| FED-M1-06 | in-progress | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Existing doctor output evolves; add `--json` flag. Green/yellow/red + remediation suggestions per issue. |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------- | ---------- | -------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| FED-M1-01 | not-started | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | codex | feat/federation-m1-tier-config | — | 4K | Schema lives in `packages/types`; validator in gateway bootstrap. No behavior change yet — enum only. |
|
||||
| FED-M1-02 | not-started | Author `docker-compose.federated.yml` as an overlay profile: Postgres 16 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | codex | feat/federation-m1-compose | FED-M1-01 | 5K | Overlay on existing `docker-compose.yml`; no changes to base file. Add `profile: federated` gating. |
|
||||
| FED-M1-03 | not-started | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | codex | feat/federation-m1-pgvector | FED-M1-02 | 8K | Extension create is idempotent `CREATE EXTENSION IF NOT EXISTS vector`. Gate on tier = federated. |
|
||||
| FED-M1-04 | not-started | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | codex | feat/federation-m1-detector | FED-M1-03 | 8K | Structured error type with remediation hints. Logs which service failed, with host:port attempted. |
|
||||
| FED-M1-05 | not-started | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | codex | feat/federation-m1-migrate | FED-M1-04 | 10K | Do NOT run automatically. CLI subcommand `mosaic migrate tier --to federated --dry-run`. Safety rails. |
|
||||
| FED-M1-06 | not-started | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Existing doctor output evolves; add `--json` flag. Green/yellow/red + remediation suggestions per issue. |
|
||||
| FED-M1-07 | not-started | Integration test: gateway boots in `federated` tier with docker-compose `federated` profile; refuses to boot when PG unreachable (asserts fail-fast); pgvector extension query succeeds. | #460 | sonnet | feat/federation-m1-integration | FED-M1-04 | 8K | Vitest + docker-compose test profile. One test file per assertion; real services, no mocks. |
|
||||
| FED-M1-08 | not-started | Integration test for migration script: seed a local PGlite with representative data (tasks, notes, users, teams), run migration, assert row counts + key samples equal on federated PG. | #460 | sonnet | feat/federation-m1-migrate-test | FED-M1-05 | 6K | Runs against docker-compose federated profile; uses temp PGlite file; deterministic seed. |
|
||||
| FED-M1-09 | not-started | Standalone regression: full agent-session E2E on existing `standalone` tier with a gateway built from this branch. Must pass without referencing any federation module. | #460 | haiku | feat/federation-m1-regression | FED-M1-07 | 4K | Reuse existing e2e harness; just re-point at the federation branch build. Canary that we didn't break it. |
|
||||
|
||||
@@ -266,116 +266,3 @@ Issues closed: #52, #55, #57, #58, #120-#134
|
||||
**P8-018 closed:** Spin-off stubs created (gatekeeper-service.md, task-queue-unification.md, chroot-sandboxing.md)
|
||||
|
||||
**Next:** Begin execution at Wave 1 — P8-007 (DB migrations) + P8-008 (Types) in parallel.
|
||||
|
||||
---
|
||||
|
||||
### Session 15 — 2026-04-19 — MVP Rollup Manifest Authored
|
||||
|
||||
| Session | Date | Milestone | Tasks Done | Outcome |
|
||||
| ------- | ---------- | -------------- | ------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 15 | 2026-04-19 | (rollup-level) | MVP-T01 (manifest), MVP-T02 (archive iuv-v2), MVP-T03 (land FED planning) | Authored MVP rollup manifest at `docs/MISSION-MANIFEST.md`. Federation v1 planning merged to `main` (PR #468 / commit `66512550`). Install-ux-v2 archived as complete. |
|
||||
|
||||
**Gap context:** The MVP scratchpad was last updated at Session 14 (2026-03-15). In the intervening month, two sub-missions ran outside the MVP framework: `install-ux-hardening` (complete, `mosaic-v0.0.25`) and `install-ux-v2` (complete on 2026-04-19, `0.0.27` → `0.0.29`). Both archived under `docs/archive/missions/`. The phase-based execution from Sessions 1–14 (Phases 0–8, issues #1–#172) substantially shipped during this window via those sub-missions and standalone PRs — the MVP mission was nominally active but had no rollup manifest tracking it.
|
||||
|
||||
**User reframe (this session):**
|
||||
|
||||
> There will be more in the MVP. This will inevitably become scope creep. I need a solution that works via webUI, TUI, CLI, and just works for MVP. Federation is required because I need it to work NOW, so my disparate jarvis-brain usage can be consolidated properly.
|
||||
|
||||
**Decisions:**
|
||||
|
||||
1. **MVP is the rollup mission**, not a single-purpose mission. Federation v1 is one workstream of MVP, not MVP itself. Phase 0–8 work is preserved as historical context but is no longer the primary control plane.
|
||||
2. **Three-surface parity (webUI / TUI / CLI) is a cross-cutting MVP requirement** (MVP-X1), not a workstream. Encoded explicitly so it can't be silently dropped.
|
||||
3. **Scope creep is named and accommodated.** Manifest has explicit "Likely Additional Workstreams" section listing PRD-derived candidates without committing execution capacity to them.
|
||||
4. **Workstream isolation** — each workstream gets its own manifest under `docs/{workstream}/MISSION-MANIFEST.md`. MVP manifest is rollup only.
|
||||
5. **Archive-don't-delete** — install-ux-v2 manifest moved to `docs/archive/missions/install-ux-v2-20260405/` with status corrected to `complete` (IUV-M03 closeout note added pointing at PR #446 + releases 0.0.27 → 0.0.29).
|
||||
6. **Federation planning landed first** — PR #468 merged before MVP manifest authored, so the manifest references real on-`main` artifacts.
|
||||
|
||||
**Open items:**
|
||||
|
||||
- `.mosaic/orchestrator/mission.json` MVP slot remains empty (zero milestones). Tracked as MVP-T04. Defer until next session — does not block W1 kickoff. Open question: hand-edit vs. `mosaic coord init` reinit.
|
||||
- Additional workstreams (web dashboard parity, TUI/CLI completion, remote control, multi-user/SSO, LLM provider expansion, MCP, brain) anticipated per PRD but not declared. Pre-staged in manifest's "Likely Additional Workstreams" list.
|
||||
|
||||
**Artifacts this session:**
|
||||
|
||||
| Artifact | Status |
|
||||
| -------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| PR #468 (`docs(federation): PRD, milestones, mission manifest, and M1 task breakdown`) | merged 2026-04-19 → `main` (commit `66512550`) |
|
||||
| `docs/MISSION-MANIFEST.md` (MVP rollup, replaces install-ux-v2 manifest) | authored on `docs/mvp-mission-manifest` branch |
|
||||
| `docs/TASKS.md` (MVP rollup, points at workstream task files) | authored |
|
||||
| Install-ux-v2 manifest + tasks + scratchpad + iuv-m03-design | moved to `docs/archive/missions/install-ux-v2-20260405/` with status corrected to complete |
|
||||
|
||||
**Next:** PR `docs/mvp-mission-manifest` → merge to `main` → next session begins W1 / FED-M1 from clean state.
|
||||
|
||||
---
|
||||
|
||||
## Session 16 — 2026-04-19 — claude
|
||||
|
||||
**Mode:** Delivery (W1 / FED-M1 execution)
|
||||
**Branch:** `feat/federation-m1-tier-config`
|
||||
**Context budget:** 200K, currently ~45% used (compaction-aware)
|
||||
|
||||
**Goal:** FED-M1-01 — extend `mosaic.config.json` schema: add `"federated"` to tier enum.
|
||||
|
||||
**Critical reconciliation surfaced during pre-flight:**
|
||||
|
||||
The federation PRD (`docs/federation/PRD.md` line 247) defines three tiers: `local | standalone | federated`.
|
||||
The existing code (`packages/config/src/mosaic-config.ts`, `packages/mosaic/src/types.ts`, `packages/mosaic/src/stages/gateway-config.ts`) uses `local | team`.
|
||||
|
||||
`team` is the same conceptual tier as PRD `standalone` (Postgres + Valkey, no pgvector). Rather than carrying a confusing alias forever, FED-M1-01 will rename `team` → `standalone` and add `federated` as a third value, so all downstream federation work has a coherent vocabulary.
|
||||
|
||||
Affected files (storage-tier semantics only — Team/workspace usages unaffected):
|
||||
|
||||
- `packages/config/src/mosaic-config.ts` (StorageTier type, validator enum, defaults)
|
||||
- `packages/mosaic/src/types.ts` (GatewayStorageTier)
|
||||
- `packages/mosaic/src/stages/gateway-config.ts` (~10 references)
|
||||
- `packages/mosaic/src/stages/gateway-config.spec.ts` (test references)
|
||||
- Possibly `tools/e2e-install-test.sh` (referenced grep) and headless env hint string
|
||||
|
||||
**Worker plan:**
|
||||
|
||||
1. Spawn sonnet subagent with explicit task spec + the reconciliation context above.
|
||||
2. Worker delivers diff; orchestrator runs `pnpm typecheck && pnpm lint && pnpm format:check`.
|
||||
3. Independent `feature-dev:code-reviewer` subagent reviews diff.
|
||||
4. Second independent verification subagent (general-purpose, sonnet) verifies reviewer's claims and confirms all `'team'` storage-tier references migrated, no `Team`/workspace bleed.
|
||||
5. Open PR via tea CLI; wait for CI; queue-guard; squash merge; record actuals.
|
||||
|
||||
**Open items:**
|
||||
|
||||
- `MVP-T04` (sync `.mosaic/orchestrator/mission.json`) still deferred.
|
||||
- `team` tier rename touches install wizard headless env vars (`MOSAIC_STORAGE_TIER=team`); will need 0.0.x deprecation note in scratchpad if release notes are written this milestone.
|
||||
|
||||
---
|
||||
|
||||
## Session 17 — 2026-04-19 — claude
|
||||
|
||||
**Mode:** Delivery (W1 / FED-M1 execution; resumed after compaction)
|
||||
**Branches landed this run:** `feat/federation-m1-tier-config` (PR #470), `feat/federation-m1-compose` (PR #471), `feat/federation-m1-pgvector` (PR #472)
|
||||
**Branch active at end:** `feat/federation-m1-detector` (FED-M1-04, ready to push)
|
||||
|
||||
**Tasks closed:** FED-M1-01, FED-M1-02, FED-M1-03 (all merged to `main` via squash, CI green, issue #460 still open as milestone).
|
||||
|
||||
**FED-M1-04 — tier-detector:** Worker delivered `apps/gateway/src/bootstrap/tier-detector.ts` (~210 lines) + `tier-detector.spec.ts` (12 tests). Independent code review (sonnet) returned `changes-required` with 3 issues:
|
||||
|
||||
1. CRITICAL: `probeValkey` missing `connectTimeout: 5000` on the ioredis Redis client (defaulted to 10s, violated fail-fast spec).
|
||||
2. IMPORTANT: `probePgvector` catch block did not discriminate "library not installed" (use `pgvector/pgvector:pg17`) from permission errors.
|
||||
3. IMPORTANT: Federated tier silently skipped Valkey probe when `queue.type !== 'bullmq'` (computed Valkey URL conditionally).
|
||||
|
||||
Worker fix-up round addressed all three:
|
||||
|
||||
- L147: `connectTimeout: 5000` added to Redis options
|
||||
- L113-117: catch block branches on `extension "vector" is not available` substring → distinct remediation per failure mode
|
||||
- L206-215: federated branch fails fast with `service: 'config'` if `queue.type !== 'bullmq'`, then probes Valkey unconditionally
|
||||
- 4 new tests (8 → 12 total) cover each fix specifically
|
||||
|
||||
Independent verifier (haiku) confirmed all 6 verification claims (line numbers, test presence, suite green: 12/12 PASS).
|
||||
|
||||
**Process note — review pipeline working as designed:**
|
||||
|
||||
Initial verifier (haiku) on the first delivery returned "OK to ship" but missed the 3 deeper issues that the sonnet code-reviewer caught. This validates the user's "always verify subagent claims independently with another subagent" rule — but specifically with the **right tier** for the task: code review needs sonnet-level reasoning, while haiku is fine for verifying surface claims (line counts, file existence) once review issues are known. Going forward: code review uses sonnet (`feature-dev:code-reviewer`), claim verification uses haiku.
|
||||
|
||||
**Followup tasks tracked but deferred:**
|
||||
|
||||
- #7: `tier=local` hardcoded in gateway-config resume branches (~262, ~317) — pre-existing bug, fix during M1-06 (doctor) or M1-09 (regression).
|
||||
- #8: confirm `packages/config/dist` not git-tracked.
|
||||
|
||||
**Next:** PR for FED-M1-04 → CI wait → merge. Then FED-M1-05 (migration script, codex/sonnet, 10K).
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
# Hotfix Scratchpad — `install.sh` does not seed `TOOLS.md`
|
||||
|
||||
- **Issue:** mosaicstack/stack#457
|
||||
- **Branch:** `fix/tools-md-seeding`
|
||||
- **Type:** Out-of-mission hotfix (not part of Install UX v2 mission)
|
||||
- **Started:** 2026-04-11
|
||||
- **Ships in:** `@mosaicstack/mosaic` 0.0.30
|
||||
|
||||
## Objective
|
||||
|
||||
Ensure `~/.config/mosaic/TOOLS.md` is created on every supported install path so the mandatory AGENTS.md load order actually resolves. The load order lists `TOOLS.md` at position 5 but the bash installer never seeds it.
|
||||
|
||||
## Root cause
|
||||
|
||||
`packages/mosaic/framework/install.sh:228-236` — the post-sync "Seed defaults" loop explicitly lists `AGENTS.md STANDARDS.md`:
|
||||
|
||||
```bash
|
||||
DEFAULTS_DIR="$TARGET_DIR/defaults"
|
||||
if [[ -d "$DEFAULTS_DIR" ]]; then
|
||||
for default_file in AGENTS.md STANDARDS.md; do # ← missing TOOLS.md
|
||||
if [[ -f "$DEFAULTS_DIR/$default_file" ]] && [[ ! -f "$TARGET_DIR/$default_file" ]]; then
|
||||
cp "$DEFAULTS_DIR/$default_file" "$TARGET_DIR/$default_file"
|
||||
ok "Seeded $default_file from defaults"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
```
|
||||
|
||||
`TOOLS.md` is listed in `PRESERVE_PATHS` (line 24) but never created in the first place. A fresh bootstrap install via `tools/install.sh → framework/install.sh` leaves `~/.config/mosaic/TOOLS.md` absent, and the agent load order then points at a missing file.
|
||||
|
||||
### Secondary: TypeScript `syncFramework` is too greedy
|
||||
|
||||
`packages/mosaic/src/config/file-adapter.ts:133-160` — `FileConfigAdapter.syncFramework` correctly seeds TOOLS.md, but it does so by iterating _every_ file in `framework/defaults/`:
|
||||
|
||||
```ts
|
||||
for (const entry of readdirSync(defaultsDir)) {
|
||||
const dest = join(this.mosaicHome, entry);
|
||||
if (!existsSync(dest)) {
|
||||
copyFileSync(join(defaultsDir, entry), dest);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`framework/defaults/` contains:
|
||||
|
||||
```
|
||||
AGENTS.md
|
||||
AUDIT-2026-02-17-framework-consistency.md
|
||||
README.md
|
||||
SOUL.md ← hardcoded "Jarvis"
|
||||
STANDARDS.md
|
||||
TOOLS.md
|
||||
USER.md
|
||||
```
|
||||
|
||||
So on a fresh install the TS wizard would silently copy the `Jarvis`-flavored `SOUL.md` + placeholder `USER.md` + internal `AUDIT-*.md` and `README.md` into the user's mosaic home before `mosaic init` ever prompts them. That's a latent identity bug as well as a root-clutter bug — the wizard's own stages are responsible for generating `SOUL.md`/`USER.md` via templates.
|
||||
|
||||
### Tertiary: stale `TOOLS.md.template`
|
||||
|
||||
`packages/mosaic/framework/templates/TOOLS.md.template` still references `~/.config/mosaic/rails/git/…` and `~/.config/mosaic/rails/codex/…`. The `rails/` tree was renamed to `tools/` in the v1→v2 migration (see `run_migrations` in `install.sh`, which removes the old `rails/` symlink). Any user who does run `mosaic init` ends up with a `TOOLS.md` that points to paths that no longer exist.
|
||||
|
||||
## Scope of this fix
|
||||
|
||||
1. **`packages/mosaic/framework/install.sh`** — extend the explicit seed list to include `TOOLS.md`.
|
||||
2. **`packages/mosaic/src/config/file-adapter.ts`** — restrict `syncFramework` defaults-seeding to an explicit whitelist (`AGENTS.md`, `STANDARDS.md`, `TOOLS.md`) so the TS wizard never accidentally seeds `SOUL.md`/`USER.md`/`README.md`/`AUDIT-*.md` into the mosaic home.
|
||||
3. **`packages/mosaic/framework/templates/TOOLS.md.template`** — replace `rails/` with `tools/` in the wrapper-path examples (minimal surgical fix; full template modernization is out of scope for a 0.0.30 hotfix).
|
||||
4. **Regression test** — unit test around `FileConfigAdapter.syncFramework` that runs against a tmpdir fixture asserting:
|
||||
- `TOOLS.md` is seeded when absent
|
||||
- `AGENTS.md` / `STANDARDS.md` are still seeded when absent
|
||||
- `SOUL.md` / `USER.md` are **not** seeded from `defaults/` (the wizard stages own those)
|
||||
- Existing root files are not clobbered.
|
||||
|
||||
Out of scope (tracked separately / future work):
|
||||
|
||||
- Regenerating `defaults/SOUL.md` and `defaults/USER.md` so they no longer contain Jarvis-specific content.
|
||||
- Fully modernizing `TOOLS.md.template` to match the rich canonical `defaults/TOOLS.md` reference.
|
||||
- `issue-create.sh` / `pr-create.sh` `eval` bugs (already captured to OpenBrain from the prior hotfix).
|
||||
|
||||
## Plan / checklist
|
||||
|
||||
- [ ] Branch `fix/tools-md-seeding` from `main` (at `b2cbf89`)
|
||||
- [ ] File Gitea issue (direct API; wrappers broken for bodies with backticks)
|
||||
- [ ] Scratchpad created (this file)
|
||||
- [ ] `install.sh` seed loop extended to `AGENTS.md STANDARDS.md TOOLS.md`
|
||||
- [ ] `file-adapter.ts` seeding restricted to explicit whitelist
|
||||
- [ ] `TOOLS.md.template` `rails/` → `tools/`
|
||||
- [ ] Regression test added (`file-adapter.test.ts`) — failing first, then green
|
||||
- [ ] `pnpm --filter @mosaicstack/mosaic run typecheck` green
|
||||
- [ ] `pnpm --filter @mosaicstack/mosaic run lint` green
|
||||
- [ ] `pnpm --filter @mosaicstack/mosaic exec vitest run` — new test green, no new failures beyond the known pre-existing `uninstall.spec.ts:138`
|
||||
- [ ] Repo baselines: `pnpm typecheck` / `pnpm lint` / `pnpm format:check`
|
||||
- [ ] Independent code review (`feature-dev:code-reviewer`, sonnet tier)
|
||||
- [ ] Commit + push
|
||||
- [ ] PR opened via Gitea API
|
||||
- [ ] CI queue guard cleared (bypass local `ci-queue-wait.sh` if stale origin URL breaks it; query Gitea API directly)
|
||||
- [ ] CI green on PR
|
||||
- [ ] PR merged (squash)
|
||||
- [ ] CI green on main
|
||||
- [ ] Issue closed with link to merge commit
|
||||
- [ ] `chore/release-mosaic-0.0.30` branch bumps `packages/mosaic/package.json` 0.0.29 → 0.0.30
|
||||
- [ ] Release PR opened + merged
|
||||
- [ ] `.woodpecker/publish.yml` auto-publishes to Gitea npm registry
|
||||
- [ ] Publish verified (`npm view @mosaicstack/mosaic version` or registry check)
|
||||
|
||||
## Risks / blockers
|
||||
|
||||
- `ci-queue-wait.sh` wrapper may still crash on stale `origin` URL (captured in OpenBrain from prior hotfix). Workaround: query Gitea API directly for running/queued pipelines.
|
||||
- `issue-create.sh` / `pr-create.sh` `eval` bugs. Workaround: Gitea API direct call.
|
||||
- `uninstall.spec.ts:138` is a pre-existing failure on main; not this change's problem.
|
||||
- Publish flow is fire-and-forget on main push — if `publish.yml` fails, rollback means republishing a follow-up patch, not reverting the version bump.
|
||||
@@ -1,114 +0,0 @@
|
||||
# Hotfix Scratchpad — `mosaic yolo <runtime>` passes runtime name as initial user message
|
||||
|
||||
- **Issue:** mosaicstack/stack#454
|
||||
- **Branch:** `fix/yolo-runtime-initial-arg`
|
||||
- **Type:** Out-of-mission hotfix (not part of Install UX v2 mission)
|
||||
- **Started:** 2026-04-11
|
||||
|
||||
## Objective
|
||||
|
||||
Stop `mosaic yolo <runtime>` from passing the runtime name (`claude`, `codex`, etc.) as the initial user message to the underlying CLI. Restore the mission-auto-prompt path for yolo launches.
|
||||
|
||||
## Root cause (confirmed)
|
||||
|
||||
`packages/mosaic/src/commands/launch.ts:779` — the `yolo <runtime>` action handler:
|
||||
|
||||
```ts
|
||||
.action((runtime: string, _opts: unknown, cmd: Command) => {
|
||||
// ... validate runtime ...
|
||||
launchRuntime(runtime as RuntimeName, cmd.args, true);
|
||||
});
|
||||
```
|
||||
|
||||
Commander.js includes declared positional arguments in `cmd.args`. For `mosaic yolo claude`:
|
||||
|
||||
- `runtime` (destructured) = `"claude"`
|
||||
- `cmd.args` = `["claude"]` — the same value
|
||||
|
||||
`launchRuntime` treats `["claude"]` as excess positional args, and for the `claude` case that becomes the initial user message. As a secondary consequence, `hasMissionNoArgs` evaluates false, so the mission-auto-prompt path is bypassed too.
|
||||
|
||||
## Live reproduction (intercepted claude binary)
|
||||
|
||||
```
|
||||
$ PATH=/tmp/fake-claude-bin:$PATH mosaic yolo claude
|
||||
[mosaic] Launching Claude Code in YOLO mode...
|
||||
argv[1]: --dangerously-skip-permissions
|
||||
argv[2]: --append-system-prompt
|
||||
argv[3] (len=25601): # ACTIVE MISSION — HARD GATE ...
|
||||
argv[4]: claude ← the bug
|
||||
```
|
||||
|
||||
Non-yolo variant `mosaic claude` is clean:
|
||||
|
||||
```
|
||||
argv[1]: --append-system-prompt
|
||||
argv[2]: <prompt>
|
||||
argv[3]: Active mission detected: MVP. Read the mission state files and report status.
|
||||
```
|
||||
|
||||
## Plan
|
||||
|
||||
1. Refactor `launch.ts`: extract `registerRuntimeLaunchers(program, handler)` with an injectable handler so commander wiring is testable without spawning subprocesses. `registerLaunchCommands` delegates to it with `launchRuntime` as the handler.
|
||||
2. Fix: in the `yolo <runtime>` action, pass `cmd.args.slice(1)` instead of `cmd.args`.
|
||||
3. Add `packages/mosaic/src/commands/launch.spec.ts`:
|
||||
- Failing-first reproducer: parse `['node','x','yolo','claude']` and assert handler receives `extraArgs=[]` and `yolo=true`.
|
||||
- Regression test: parse `['node','x','claude']` asserts handler receives `extraArgs=[]` and `yolo=false`.
|
||||
- Excess args: parse `['node','x','yolo','claude','--print','hi']` asserts handler receives `extraArgs=['--print','hi']` (with `--print` kept because `allowUnknownOption` is true).
|
||||
- Excess args non-yolo: parse `['node','x','claude','--print','hi']` asserts `extraArgs=['--print','hi']`.
|
||||
- Reject unknown runtime under yolo.
|
||||
4. Run typecheck, lint, format:check, vitest for `@mosaicstack/mosaic`.
|
||||
5. Independent code review (feature-dev:code-reviewer subagent, sonnet tier).
|
||||
6. Commit → push → PR via wrappers → merge → CI green → close issue #454.
|
||||
7. Release decision (`mosaic-v0.0.30`) deferred to Jason after merge.
|
||||
|
||||
## Framework compliance sub-findings (out-of-scope; to capture in OpenBrain after)
|
||||
|
||||
- `~/.config/mosaic/tools/git/issue-create.sh` uses `eval` on `$BODY`; arbitrary bodies with backticks, `$`, or parens break catastrophically.
|
||||
- `gitea_issue_create_api` fallback uses `curl -fsS` without `-L`; after the `mosaicstack/mosaic-stack → mosaicstack/stack` rename, the API redirect is not followed and the fallback silently fails.
|
||||
- Local repo `origin` remote still points at old `mosaic/mosaic-stack.git` slug. Not touched here per git-config safety rule.
|
||||
- `~/.config/mosaic/TOOLS.md` referenced by the global load order but does not exist on disk.
|
||||
|
||||
These will be captured to OpenBrain after the hotfix merges so they don't get lost, and filed as separate tracking items.
|
||||
|
||||
## Progress checkpoints
|
||||
|
||||
- [x] Branch created (`fix/yolo-runtime-initial-arg`)
|
||||
- [x] Issue #454 opened
|
||||
- [x] Scratchpad scaffolded
|
||||
- [x] Failing test added (red)
|
||||
- [x] Refactor + fix applied
|
||||
- [x] Tests green (launch.spec.ts 11/11)
|
||||
- [x] Baselines green (typecheck, lint, format:check, vitest — pre-existing `uninstall.spec.ts:138` failure on branch main acknowledged, not caused by this change)
|
||||
- [x] Code review pass (feature-dev:code-reviewer, sonnet — no blockers)
|
||||
- [x] Commit + push (commit 1dd4f59)
|
||||
- [x] PR opened (mosaicstack/stack#455)
|
||||
- [x] CI queue guard cleared (no pending pipelines pre-push or pre-merge)
|
||||
- [x] PR merged (squash merge commit b2cec8c6bac29336a6cdcdb4f19806f7b5fa0054)
|
||||
- [x] CI green on main (`ci/woodpecker/push/ci` + `ci/woodpecker/push/publish` both success on merge commit)
|
||||
- [x] Issue #454 closed
|
||||
- [x] Scratchpad final evidence entry
|
||||
|
||||
## Tests run
|
||||
|
||||
- `pnpm --filter @mosaicstack/mosaic run typecheck` → green
|
||||
- `pnpm --filter @mosaicstack/mosaic run lint` → green
|
||||
- `pnpm --filter @mosaicstack/mosaic exec prettier --check "src/**/*.ts"` → green
|
||||
- `pnpm --filter @mosaicstack/mosaic exec vitest run src/commands/launch.spec.ts` → 11/11 pass
|
||||
- `pnpm --filter @mosaicstack/mosaic exec vitest run` → 270/271 pass (1 pre-existing `uninstall.spec.ts:138` EACCES failure, confirmed on the branch before this change)
|
||||
- `pnpm typecheck` (repo) → green
|
||||
- `pnpm lint` (repo) → green
|
||||
- `pnpm format:check` (repo) → green (after prettier-writing the scratchpad)
|
||||
|
||||
## Risks / blockers
|
||||
|
||||
None expected. Refactor is small and the Commander API is stable. Test needs `exitOverride()` to prevent `process.exit` on invalid runtime.
|
||||
|
||||
## Final verification evidence
|
||||
|
||||
- PR: mosaicstack/stack#455 — state `closed`, merged.
|
||||
- Merge commit: `b2cec8c6bac29336a6cdcdb4f19806f7b5fa0054` (squash to `main`).
|
||||
- Post-merge CI (main @ b2cec8c6): `ci/woodpecker/push/ci` = success, `ci/woodpecker/push/publish` = success. (`ci/woodpecker/tag/publish` was last observed as a pre-existing failure on the prior release tag and is unrelated to this change.)
|
||||
- Issue mosaicstack/stack#454 closed with a comment linking the merge commit.
|
||||
- Launch regression suite: `launch.spec.ts` 11/11 pass on main.
|
||||
- Baselines on main after merge are inherited from the PR CI run.
|
||||
- Release decision (`mosaicstack/mosaic` 0.0.30) intentionally deferred to the user — the fix is now sitting on main awaiting a release cut.
|
||||
@@ -28,7 +28,6 @@ export default tseslint.config(
|
||||
'apps/web/e2e/helpers/*.ts',
|
||||
'apps/web/playwright.config.ts',
|
||||
'apps/gateway/vitest.config.ts',
|
||||
'packages/storage/vitest.config.ts',
|
||||
'packages/mosaic/__tests__/*.ts',
|
||||
],
|
||||
},
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
export type { MosaicConfig, StorageTier, MemoryConfigRef } from './mosaic-config.js';
|
||||
export {
|
||||
DEFAULT_LOCAL_CONFIG,
|
||||
DEFAULT_STANDALONE_CONFIG,
|
||||
DEFAULT_FEDERATED_CONFIG,
|
||||
DEFAULT_TEAM_CONFIG,
|
||||
loadConfig,
|
||||
validateConfig,
|
||||
detectFromEnv,
|
||||
} from './mosaic-config.js';
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
validateConfig,
|
||||
detectFromEnv,
|
||||
DEFAULT_LOCAL_CONFIG,
|
||||
DEFAULT_STANDALONE_CONFIG,
|
||||
DEFAULT_FEDERATED_CONFIG,
|
||||
} from './mosaic-config.js';
|
||||
|
||||
describe('validateConfig — tier enum', () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let stderrSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
stderrSpy = vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
stderrSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('accepts tier="local"', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'local',
|
||||
storage: { type: 'pglite', dataDir: '.mosaic/storage-pglite' },
|
||||
queue: { type: 'local', dataDir: '.mosaic/queue' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
expect(result.tier).toBe('local');
|
||||
});
|
||||
|
||||
it('accepts tier="standalone"', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
expect(result.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('accepts tier="federated"', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'federated',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'pgvector' },
|
||||
});
|
||||
expect(result.tier).toBe('federated');
|
||||
});
|
||||
|
||||
it('accepts deprecated tier="team" as alias for "standalone" and emits a deprecation warning', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'team',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
expect(result.tier).toBe('standalone');
|
||||
expect(stderrSpy).toHaveBeenCalledWith(expect.stringContaining('DEPRECATED'));
|
||||
});
|
||||
|
||||
it('rejects an invalid tier with an error listing all three valid values', () => {
|
||||
expect(() =>
|
||||
validateConfig({
|
||||
tier: 'invalid',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
}),
|
||||
).toThrow(/local.*standalone.*federated|federated.*standalone.*local/);
|
||||
});
|
||||
|
||||
it('error message for invalid tier mentions all three valid values', () => {
|
||||
let message = '';
|
||||
try {
|
||||
validateConfig({
|
||||
tier: 'invalid',
|
||||
storage: { type: 'postgres', url: 'postgresql://...' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
} catch (err) {
|
||||
message = err instanceof Error ? err.message : String(err);
|
||||
}
|
||||
expect(message).toContain('"local"');
|
||||
expect(message).toContain('"standalone"');
|
||||
expect(message).toContain('"federated"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('DEFAULT_* config constants', () => {
|
||||
it('DEFAULT_LOCAL_CONFIG has tier="local"', () => {
|
||||
expect(DEFAULT_LOCAL_CONFIG.tier).toBe('local');
|
||||
});
|
||||
|
||||
it('DEFAULT_STANDALONE_CONFIG has tier="standalone"', () => {
|
||||
expect(DEFAULT_STANDALONE_CONFIG.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG has tier="federated" and pgvector memory', () => {
|
||||
expect(DEFAULT_FEDERATED_CONFIG.tier).toBe('federated');
|
||||
expect(DEFAULT_FEDERATED_CONFIG.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG uses port 5433 (distinct from standalone 5432)', () => {
|
||||
const url = (DEFAULT_FEDERATED_CONFIG.storage as { url: string }).url;
|
||||
expect(url).toContain('5433');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG has enableVector=true on storage', () => {
|
||||
const storage = DEFAULT_FEDERATED_CONFIG.storage as {
|
||||
type: string;
|
||||
url: string;
|
||||
enableVector?: boolean;
|
||||
};
|
||||
expect(storage.enableVector).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('detectFromEnv — tier env-var routing', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
// Work on a fresh copy so individual tests can set/delete keys freely.
|
||||
process.env = { ...originalEnv };
|
||||
delete process.env['MOSAIC_STORAGE_TIER'];
|
||||
delete process.env['DATABASE_URL'];
|
||||
delete process.env['VALKEY_URL'];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('no env vars → returns local config', () => {
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('local');
|
||||
expect(config.storage.type).toBe('pglite');
|
||||
expect(config.memory.type).toBe('keyword');
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=federated alone → returns federated config with enableVector=true', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('federated');
|
||||
expect(config.memory.type).toBe('pgvector');
|
||||
const storage = config.storage as { type: string; enableVector?: boolean };
|
||||
expect(storage.enableVector).toBe(true);
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=federated + DATABASE_URL → uses the URL and still has enableVector=true', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
process.env['DATABASE_URL'] = 'postgresql://custom:pass@db.example.com:5432/mydb';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('federated');
|
||||
const storage = config.storage as { type: string; url: string; enableVector?: boolean };
|
||||
expect(storage.url).toBe('postgresql://custom:pass@db.example.com:5432/mydb');
|
||||
expect(storage.enableVector).toBe(true);
|
||||
expect(config.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=standalone alone → returns standalone-shaped config (not local)', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'standalone';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('standalone');
|
||||
expect(config.storage.type).toBe('postgres');
|
||||
expect(config.memory.type).toBe('keyword');
|
||||
});
|
||||
});
|
||||
@@ -7,7 +7,7 @@ import type { QueueAdapterConfig as QueueConfig } from '@mosaicstack/queue';
|
||||
/* Types */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export type StorageTier = 'local' | 'standalone' | 'federated';
|
||||
export type StorageTier = 'local' | 'team';
|
||||
|
||||
export interface MemoryConfigRef {
|
||||
type: 'pgvector' | 'sqlite-vec' | 'keyword';
|
||||
@@ -31,21 +31,10 @@ export const DEFAULT_LOCAL_CONFIG: MosaicConfig = {
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
export const DEFAULT_STANDALONE_CONFIG: MosaicConfig = {
|
||||
tier: 'standalone',
|
||||
export const DEFAULT_TEAM_CONFIG: MosaicConfig = {
|
||||
tier: 'team',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'pgvector' },
|
||||
};
|
||||
|
||||
@@ -53,7 +42,7 @@ export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = {
|
||||
/* Validation */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const VALID_TIERS = new Set<string>(['local', 'standalone', 'federated']);
|
||||
const VALID_TIERS = new Set<string>(['local', 'team']);
|
||||
const VALID_STORAGE_TYPES = new Set<string>(['postgres', 'pglite', 'files']);
|
||||
const VALID_QUEUE_TYPES = new Set<string>(['bullmq', 'local']);
|
||||
const VALID_MEMORY_TYPES = new Set<string>(['pgvector', 'sqlite-vec', 'keyword']);
|
||||
@@ -66,19 +55,9 @@ export function validateConfig(raw: unknown): MosaicConfig {
|
||||
const obj = raw as Record<string, unknown>;
|
||||
|
||||
// tier
|
||||
let tier = obj['tier'];
|
||||
// Deprecated alias: 'team' → 'standalone' (kept for backward-compat with 0.0.x installs)
|
||||
if (tier === 'team') {
|
||||
process.stderr.write(
|
||||
'[mosaic] DEPRECATED: tier="team" is deprecated — use "standalone" instead. ' +
|
||||
'Update your mosaic.config.json.\n',
|
||||
);
|
||||
tier = 'standalone';
|
||||
}
|
||||
const tier = obj['tier'];
|
||||
if (typeof tier !== 'string' || !VALID_TIERS.has(tier)) {
|
||||
throw new Error(
|
||||
`Invalid tier "${String(tier)}" — expected "local", "standalone", or "federated"`,
|
||||
);
|
||||
throw new Error(`Invalid tier "${String(tier)}" — expected "local" or "team"`);
|
||||
}
|
||||
|
||||
// storage
|
||||
@@ -123,33 +102,10 @@ export function validateConfig(raw: unknown): MosaicConfig {
|
||||
/* Loader */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function detectFromEnv(): MosaicConfig {
|
||||
const tier = process.env['MOSAIC_STORAGE_TIER'];
|
||||
|
||||
if (tier === 'federated') {
|
||||
function detectFromEnv(): MosaicConfig {
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_FEDERATED_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
enableVector: true,
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: process.env['VALKEY_URL'],
|
||||
},
|
||||
};
|
||||
}
|
||||
// MOSAIC_STORAGE_TIER=federated without DATABASE_URL — use the default
|
||||
// federated config (port 5433, enableVector: true, pgvector memory).
|
||||
return DEFAULT_FEDERATED_CONFIG;
|
||||
}
|
||||
|
||||
if (tier === 'standalone') {
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_STANDALONE_CONFIG,
|
||||
...DEFAULT_TEAM_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
@@ -160,26 +116,6 @@ export function detectFromEnv(): MosaicConfig {
|
||||
},
|
||||
};
|
||||
}
|
||||
// MOSAIC_STORAGE_TIER=standalone without DATABASE_URL — use the default
|
||||
// standalone config instead of silently falling back to local.
|
||||
return DEFAULT_STANDALONE_CONFIG;
|
||||
}
|
||||
|
||||
// Legacy: DATABASE_URL set without MOSAIC_STORAGE_TIER — treat as standalone.
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_STANDALONE_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: process.env['VALKEY_URL'],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return DEFAULT_LOCAL_CONFIG;
|
||||
}
|
||||
|
||||
|
||||
@@ -372,11 +372,7 @@ export const messages = pgTable(
|
||||
|
||||
// ─── pgvector custom type ───────────────────────────────────────────────────
|
||||
|
||||
export const vector = customType<{
|
||||
data: number[];
|
||||
driverParam: string;
|
||||
config: { dimensions: number };
|
||||
}>({
|
||||
const vector = customType<{ data: number[]; driverParam: string; config: { dimensions: number } }>({
|
||||
dataType(config) {
|
||||
return `vector(${config?.dimensions ?? 1536})`;
|
||||
},
|
||||
|
||||
@@ -222,17 +222,12 @@ sync_framework
|
||||
mkdir -p "$TARGET_DIR/memory"
|
||||
mkdir -p "$TARGET_DIR/credentials"
|
||||
|
||||
# Seed defaults — copy framework contract files from defaults/ to framework
|
||||
# root if not already present. These ship with sensible defaults but must
|
||||
# Seed defaults — copy from defaults/ to framework root if not already present.
|
||||
# These are user-editable files that ship with sensible defaults but should
|
||||
# never be overwritten once the user has customized them.
|
||||
#
|
||||
# This list must match the framework-contract whitelist in
|
||||
# packages/mosaic/src/config/file-adapter.ts (FileConfigAdapter.syncFramework).
|
||||
# SOUL.md and USER.md are intentionally NOT seeded here — they are generated
|
||||
# by `mosaic init` from templates with user-supplied values.
|
||||
DEFAULTS_DIR="$TARGET_DIR/defaults"
|
||||
if [[ -d "$DEFAULTS_DIR" ]]; then
|
||||
for default_file in AGENTS.md STANDARDS.md TOOLS.md; do
|
||||
for default_file in AGENTS.md STANDARDS.md; do
|
||||
if [[ -f "$DEFAULTS_DIR/$default_file" ]] && [[ ! -f "$TARGET_DIR/$default_file" ]]; then
|
||||
cp "$DEFAULTS_DIR/$default_file" "$TARGET_DIR/$default_file"
|
||||
ok "Seeded $default_file from defaults"
|
||||
|
||||
@@ -5,32 +5,32 @@ Project-specific tooling belongs in the project's `AGENTS.md`, not here.
|
||||
|
||||
## Mosaic Git Wrappers (Use First)
|
||||
|
||||
Mosaic wrappers at `~/.config/mosaic/tools/git/*.sh` handle platform detection and edge cases. Always use these before raw CLI commands.
|
||||
Mosaic wrappers at `~/.config/mosaic/rails/git/*.sh` handle platform detection and edge cases. Always use these before raw CLI commands.
|
||||
|
||||
```bash
|
||||
# Issues
|
||||
~/.config/mosaic/tools/git/issue-create.sh
|
||||
~/.config/mosaic/tools/git/issue-close.sh
|
||||
~/.config/mosaic/rails/git/issue-create.sh
|
||||
~/.config/mosaic/rails/git/issue-close.sh
|
||||
|
||||
# PRs
|
||||
~/.config/mosaic/tools/git/pr-create.sh
|
||||
~/.config/mosaic/tools/git/pr-merge.sh
|
||||
~/.config/mosaic/rails/git/pr-create.sh
|
||||
~/.config/mosaic/rails/git/pr-merge.sh
|
||||
|
||||
# Milestones
|
||||
~/.config/mosaic/tools/git/milestone-create.sh
|
||||
~/.config/mosaic/rails/git/milestone-create.sh
|
||||
|
||||
# CI queue guard (required before push/merge)
|
||||
~/.config/mosaic/tools/git/ci-queue-wait.sh --purpose push|merge
|
||||
~/.config/mosaic/rails/git/ci-queue-wait.sh --purpose push|merge
|
||||
```
|
||||
|
||||
## Code Review (Codex)
|
||||
|
||||
```bash
|
||||
# Code quality review
|
||||
~/.config/mosaic/tools/codex/codex-code-review.sh --uncommitted
|
||||
~/.config/mosaic/rails/codex/codex-code-review.sh --uncommitted
|
||||
|
||||
# Security review
|
||||
~/.config/mosaic/tools/codex/codex-security-review.sh --uncommitted
|
||||
~/.config/mosaic/rails/codex/codex-security-review.sh --uncommitted
|
||||
```
|
||||
|
||||
## Git Providers
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@mosaicstack/mosaic",
|
||||
"version": "0.0.30",
|
||||
"version": "0.0.29",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://git.mosaicstack.dev/mosaicstack/stack.git",
|
||||
|
||||
@@ -1,294 +0,0 @@
|
||||
/**
|
||||
* Unit tests for gateway-doctor.ts (mosaic gateway doctor).
|
||||
*
|
||||
* All external I/O is mocked — no live services required.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { TierHealthReport } from '@mosaicstack/storage';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Shared mock state */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const mocks = vi.hoisted(() => {
|
||||
const mockLoadConfig = vi.fn();
|
||||
const mockProbeServiceHealth = vi.fn();
|
||||
const mockExistsSync = vi.fn();
|
||||
|
||||
return { mockLoadConfig, mockProbeServiceHealth, mockExistsSync };
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Module mocks */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
vi.mock('@mosaicstack/config', () => ({
|
||||
loadConfig: mocks.mockLoadConfig,
|
||||
}));
|
||||
|
||||
vi.mock('@mosaicstack/storage', () => ({
|
||||
probeServiceHealth: mocks.mockProbeServiceHealth,
|
||||
}));
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
existsSync: mocks.mockExistsSync,
|
||||
}));
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Import SUT */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
import { runGatewayDoctor } from './gateway-doctor.js';
|
||||
import type { MosaicConfig } from '@mosaicstack/config';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Fixtures */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const STANDALONE_CONFIG: MosaicConfig = {
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq', url: 'redis://localhost:6380' },
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
const GREEN_REPORT: TierHealthReport = {
|
||||
tier: 'standalone',
|
||||
configPath: '/some/mosaic.config.json',
|
||||
overall: 'green',
|
||||
services: [
|
||||
{ name: 'postgres', status: 'ok', host: 'localhost', port: 5432, durationMs: 42 },
|
||||
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 10 },
|
||||
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
|
||||
],
|
||||
};
|
||||
|
||||
const RED_REPORT: TierHealthReport = {
|
||||
tier: 'standalone',
|
||||
configPath: '/some/mosaic.config.json',
|
||||
overall: 'red',
|
||||
services: [
|
||||
{
|
||||
name: 'postgres',
|
||||
status: 'fail',
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
durationMs: 5001,
|
||||
error: {
|
||||
message: 'connection refused',
|
||||
remediation: 'Start Postgres: `docker compose ...`',
|
||||
},
|
||||
},
|
||||
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 8 },
|
||||
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
|
||||
],
|
||||
};
|
||||
|
||||
const FEDERATED_GREEN_REPORT: TierHealthReport = {
|
||||
tier: 'federated',
|
||||
configPath: '/some/mosaic.config.json',
|
||||
overall: 'green',
|
||||
services: [
|
||||
{ name: 'postgres', status: 'ok', host: 'localhost', port: 5433, durationMs: 30 },
|
||||
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 5 },
|
||||
{ name: 'pgvector', status: 'ok', host: 'localhost', port: 5433, durationMs: 25 },
|
||||
],
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Process helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
let stdoutCapture = '';
|
||||
let exitCode: number | undefined;
|
||||
|
||||
function captureOutput(): void {
|
||||
stdoutCapture = '';
|
||||
exitCode = undefined;
|
||||
|
||||
vi.spyOn(process.stdout, 'write').mockImplementation((chunk) => {
|
||||
stdoutCapture += typeof chunk === 'string' ? chunk : chunk.toString();
|
||||
return true;
|
||||
});
|
||||
vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
vi.spyOn(process, 'exit').mockImplementation((code?: string | number | null) => {
|
||||
exitCode = typeof code === 'number' ? code : code != null ? Number(code) : undefined;
|
||||
throw new Error(`process.exit(${String(code)})`);
|
||||
});
|
||||
vi.spyOn(console, 'log').mockImplementation((...args: unknown[]) => {
|
||||
stdoutCapture += args.join(' ') + '\n';
|
||||
});
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Tests */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runGatewayDoctor', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
captureOutput();
|
||||
|
||||
// By default: no config file on disk (env-detection path)
|
||||
mocks.mockExistsSync.mockReturnValue(false);
|
||||
mocks.mockLoadConfig.mockReturnValue(STANDALONE_CONFIG);
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 1. JSON mode: parseable JSON matching the schema */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('JSON mode emits parseable JSON matching TierHealthReport schema', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({ json: true });
|
||||
|
||||
const parsed = JSON.parse(stdoutCapture) as TierHealthReport;
|
||||
expect(parsed.tier).toBe('standalone');
|
||||
expect(parsed.overall).toBe('green');
|
||||
expect(Array.isArray(parsed.services)).toBe(true);
|
||||
expect(parsed.services).toHaveLength(3);
|
||||
|
||||
// Validate shape of each service check
|
||||
for (const svc of parsed.services) {
|
||||
expect(['postgres', 'valkey', 'pgvector']).toContain(svc.name);
|
||||
expect(['ok', 'fail', 'skipped']).toContain(svc.status);
|
||||
expect(typeof svc.durationMs).toBe('number');
|
||||
}
|
||||
|
||||
// JSON mode must be silent on console.log — output goes to process.stdout only.
|
||||
expect(console.log).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('JSON mode for federated with 3 ok services', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(FEDERATED_GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({ json: true });
|
||||
|
||||
const parsed = JSON.parse(stdoutCapture) as TierHealthReport;
|
||||
expect(parsed.tier).toBe('federated');
|
||||
expect(parsed.overall).toBe('green');
|
||||
expect(parsed.services.every((s) => s.status === 'ok')).toBe(true);
|
||||
|
||||
// JSON mode must be silent on console.log — output goes to process.stdout only.
|
||||
expect(console.log).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 2. Plain text mode: service lines and overall verdict */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('plain text mode includes service lines for each service', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
expect(stdoutCapture).toContain('postgres');
|
||||
expect(stdoutCapture).toContain('valkey');
|
||||
expect(stdoutCapture).toContain('pgvector');
|
||||
});
|
||||
|
||||
it('plain text mode includes Overall verdict', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
expect(stdoutCapture).toContain('Overall: GREEN');
|
||||
});
|
||||
|
||||
it('plain text mode shows tier and config path in header', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
expect(stdoutCapture).toContain('Tier: standalone');
|
||||
});
|
||||
|
||||
it('plain text mode shows remediation for failed services', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
|
||||
|
||||
try {
|
||||
await runGatewayDoctor({});
|
||||
} catch {
|
||||
// process.exit throws in test
|
||||
}
|
||||
|
||||
expect(stdoutCapture).toContain('Remediations:');
|
||||
expect(stdoutCapture).toContain('Start Postgres');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 3. Exit codes */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('exits with code 1 when overall is red', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
|
||||
|
||||
await expect(runGatewayDoctor({})).rejects.toThrow('process.exit(1)');
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it('exits with code 0 (no exit call) when overall is green', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
// process.exit should NOT have been called for green.
|
||||
expect(exitCode).toBeUndefined();
|
||||
});
|
||||
|
||||
it('JSON mode exits with code 1 when overall is red', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
|
||||
|
||||
await expect(runGatewayDoctor({ json: true })).rejects.toThrow('process.exit(1)');
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 4. --config path override is honored */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('passes --config path to loadConfig when provided', async () => {
|
||||
const customPath = '/custom/path/mosaic.config.json';
|
||||
|
||||
await runGatewayDoctor({ config: customPath });
|
||||
|
||||
// loadConfig should have been called with the resolved custom path.
|
||||
expect(mocks.mockLoadConfig).toHaveBeenCalledWith(
|
||||
expect.stringContaining('mosaic.config.json'),
|
||||
);
|
||||
// The exact call should include the custom path (resolved).
|
||||
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
|
||||
expect(calledPath).toContain('custom/path/mosaic.config.json');
|
||||
});
|
||||
|
||||
it('calls loadConfig without path when no --config and no file on disk', async () => {
|
||||
mocks.mockExistsSync.mockReturnValue(false);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
|
||||
// When no file found, resolveConfigPath returns undefined, so loadConfig is called with undefined
|
||||
expect(calledPath).toBeUndefined();
|
||||
});
|
||||
|
||||
it('finds config from cwd when mosaic.config.json exists there', async () => {
|
||||
// First candidate (cwd/mosaic.config.json) exists.
|
||||
mocks.mockExistsSync.mockImplementation((p: unknown) => {
|
||||
return typeof p === 'string' && p.endsWith('mosaic.config.json');
|
||||
});
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
|
||||
expect(calledPath).toBeDefined();
|
||||
expect(typeof calledPath).toBe('string');
|
||||
expect(calledPath!.endsWith('mosaic.config.json')).toBe(true);
|
||||
});
|
||||
});
|
||||
@@ -1,143 +0,0 @@
|
||||
/**
|
||||
* gateway-doctor.ts — `mosaic gateway doctor` implementation.
|
||||
*
|
||||
* Reports current tier and per-service health (PG, Valkey, pgvector) for the
|
||||
* Mosaic gateway. Supports machine-readable JSON output for CI.
|
||||
*
|
||||
* Exit codes:
|
||||
* 0 — overall green or yellow
|
||||
* 1 — overall red (at least one required service failed)
|
||||
*/
|
||||
|
||||
import { existsSync } from 'node:fs';
|
||||
import { resolve, join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
import { loadConfig } from '@mosaicstack/config';
|
||||
import { probeServiceHealth } from '@mosaicstack/storage';
|
||||
import type { TierHealthReport, ServiceCheck } from '@mosaicstack/storage';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Config resolution */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const CONFIG_CANDIDATES = [
|
||||
resolve(process.cwd(), 'mosaic.config.json'),
|
||||
join(homedir(), '.mosaic', 'mosaic.config.json'),
|
||||
];
|
||||
|
||||
/**
|
||||
* Resolve the config path to report in output.
|
||||
*
|
||||
* Priority:
|
||||
* 1. Explicit `--config <path>` flag
|
||||
* 2. `./mosaic.config.json` (cwd)
|
||||
* 3. `~/.mosaic/mosaic.config.json`
|
||||
* 4. undefined — `loadConfig()` falls back to env-var detection
|
||||
*
|
||||
* `loadConfig()` itself already handles priority 1-3 when passed an explicit
|
||||
* path, and falls back to env-detection when none exists. We resolve here
|
||||
* only so we can surface the path in the health report.
|
||||
*/
|
||||
function resolveConfigPath(explicit?: string): string | undefined {
|
||||
if (explicit) return resolve(explicit);
|
||||
for (const candidate of CONFIG_CANDIDATES) {
|
||||
if (existsSync(candidate)) return candidate;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Output helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const TICK = '\u2713'; // ✓
|
||||
const CROSS = '\u2717'; // ✗
|
||||
const SKIP = '-';
|
||||
|
||||
function padRight(s: string, n: number): string {
|
||||
return s + ' '.repeat(Math.max(0, n - s.length));
|
||||
}
|
||||
|
||||
function serviceLabel(svc: ServiceCheck): string {
|
||||
const hostPort =
|
||||
svc.host !== undefined && svc.port !== undefined ? `${svc.host}:${svc.port.toString()}` : '';
|
||||
const duration = `(${svc.durationMs.toString()}ms)`;
|
||||
|
||||
switch (svc.status) {
|
||||
case 'ok':
|
||||
return ` ${TICK} ${padRight(svc.name, 10)} ${padRight(hostPort, 22)} ${duration}`;
|
||||
case 'fail': {
|
||||
const errMsg = svc.error?.message ?? 'unknown error';
|
||||
return ` ${CROSS} ${padRight(svc.name, 10)} ${padRight(hostPort, 22)} ${duration} \u2192 ${errMsg}`;
|
||||
}
|
||||
case 'skipped':
|
||||
return ` ${SKIP} ${padRight(svc.name, 10)} (skipped)`;
|
||||
}
|
||||
}
|
||||
|
||||
function printReport(report: TierHealthReport): void {
|
||||
const configDisplay = report.configPath ?? '(auto-detected)';
|
||||
console.log(`Tier: ${report.tier} Config: ${configDisplay}`);
|
||||
console.log('');
|
||||
|
||||
for (const svc of report.services) {
|
||||
console.log(serviceLabel(svc));
|
||||
}
|
||||
|
||||
console.log('');
|
||||
|
||||
// Print remediations for failed services.
|
||||
const failed = report.services.filter((s) => s.status === 'fail' && s.error);
|
||||
if (failed.length > 0) {
|
||||
console.log('Remediations:');
|
||||
for (const svc of failed) {
|
||||
if (svc.error) {
|
||||
console.log(` ${svc.name}: ${svc.error.remediation}`);
|
||||
}
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
|
||||
console.log(`Overall: ${report.overall.toUpperCase()}`);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Main runner */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export interface GatewayDoctorOptions {
|
||||
json?: boolean;
|
||||
config?: string;
|
||||
}
|
||||
|
||||
export async function runGatewayDoctor(opts: GatewayDoctorOptions): Promise<void> {
|
||||
const configPath = resolveConfigPath(opts.config);
|
||||
|
||||
let mosaicConfig;
|
||||
try {
|
||||
mosaicConfig = loadConfig(configPath);
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
if (opts.json) {
|
||||
process.stdout.write(
|
||||
JSON.stringify({ error: `Failed to load config: ${msg}` }, null, 2) + '\n',
|
||||
);
|
||||
} else {
|
||||
process.stderr.write(`Error: Failed to load config: ${msg}\n`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const report = await probeServiceHealth(mosaicConfig, configPath);
|
||||
|
||||
if (opts.json) {
|
||||
process.stdout.write(JSON.stringify(report, null, 2) + '\n');
|
||||
} else {
|
||||
printReport(report);
|
||||
}
|
||||
|
||||
// Exit 1 if overall is red.
|
||||
if (report.overall === 'red') {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
@@ -206,15 +206,4 @@ export function registerGatewayCommand(program: Command): void {
|
||||
const { runUninstall } = await import('./gateway/uninstall.js');
|
||||
await runUninstall();
|
||||
});
|
||||
|
||||
// ─── doctor ─────────────────────────────────────────────────────────────────
|
||||
|
||||
gw.command('doctor')
|
||||
.description('Check gateway tier and per-service health (PG, Valkey, pgvector)')
|
||||
.option('--json', 'Emit TierHealthReport as JSON to stdout (suppresses all other output)')
|
||||
.option('--config <path>', 'Path to mosaic.config.json (defaults to cwd or ~/.mosaic/)')
|
||||
.action(async (cmdOpts: { json?: boolean; config?: string }) => {
|
||||
const { runGatewayDoctor } = await import('./gateway-doctor.js');
|
||||
await runGatewayDoctor({ json: cmdOpts.json, config: cmdOpts.config });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach, type MockInstance } from 'vitest';
|
||||
import { Command } from 'commander';
|
||||
import { registerRuntimeLaunchers, type RuntimeLaunchHandler } from './launch.js';
|
||||
|
||||
/**
|
||||
* Tests for the commander wiring between `mosaic <runtime>` / `mosaic yolo <runtime>`
|
||||
* subcommands and the internal `launchRuntime` dispatcher.
|
||||
*
|
||||
* Regression target: see mosaicstack/stack#454 — before the fix, `mosaic yolo claude`
|
||||
* passed the literal string "claude" as an excess positional argument to the
|
||||
* underlying CLI, which Claude Code then interpreted as the first user message.
|
||||
*
|
||||
* The bug existed because Commander.js includes declared positional arguments
|
||||
* (here `<runtime>`) in `cmd.args` alongside any true excess args. The action
|
||||
* handler must slice them off before forwarding.
|
||||
*/
|
||||
|
||||
function buildProgram(handler: RuntimeLaunchHandler): Command {
|
||||
const program = new Command();
|
||||
program.exitOverride(); // prevent process.exit on parse errors
|
||||
registerRuntimeLaunchers(program, handler);
|
||||
return program;
|
||||
}
|
||||
|
||||
// `process.exit` returns `never`, so vi.spyOn demands a replacement with the
|
||||
// same signature. We throw from the mock to short-circuit into test-land.
|
||||
const exitThrows = (): never => {
|
||||
throw new Error('process.exit called');
|
||||
};
|
||||
|
||||
describe('registerRuntimeLaunchers — non-yolo subcommands', () => {
|
||||
let mockExit: MockInstance<typeof process.exit>;
|
||||
|
||||
beforeEach(() => {
|
||||
// process.exit is called when the yolo action rejects an invalid runtime.
|
||||
// Stub it so the assertion catches the rejection instead of terminating
|
||||
// the test runner.
|
||||
mockExit = vi.spyOn(process, 'exit').mockImplementation(exitThrows);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mockExit.mockRestore();
|
||||
});
|
||||
|
||||
it.each(['claude', 'codex', 'opencode', 'pi'] as const)(
|
||||
'forwards %s with empty extraArgs and yolo=false',
|
||||
(runtime) => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', runtime]);
|
||||
|
||||
expect(handler).toHaveBeenCalledTimes(1);
|
||||
expect(handler).toHaveBeenCalledWith(runtime, [], false);
|
||||
},
|
||||
);
|
||||
|
||||
it('forwards excess args after a non-yolo runtime subcommand', () => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', 'claude', '--print', 'hello']);
|
||||
|
||||
expect(handler).toHaveBeenCalledWith('claude', ['--print', 'hello'], false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('registerRuntimeLaunchers — yolo <runtime>', () => {
|
||||
let mockExit: MockInstance<typeof process.exit>;
|
||||
let mockError: MockInstance<typeof console.error>;
|
||||
|
||||
beforeEach(() => {
|
||||
mockExit = vi.spyOn(process, 'exit').mockImplementation(exitThrows);
|
||||
mockError = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mockExit.mockRestore();
|
||||
mockError.mockRestore();
|
||||
});
|
||||
|
||||
it.each(['claude', 'codex', 'opencode', 'pi'] as const)(
|
||||
'does NOT pass the runtime name as an extra arg (regression #454) for yolo %s',
|
||||
(runtime) => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', 'yolo', runtime]);
|
||||
|
||||
expect(handler).toHaveBeenCalledTimes(1);
|
||||
// The critical assertion: extraArgs must be empty, not [runtime].
|
||||
// Before the fix, cmd.args was [runtime] and the runtime name leaked
|
||||
// through to the underlying CLI as an initial positional argument.
|
||||
expect(handler).toHaveBeenCalledWith(runtime, [], true);
|
||||
},
|
||||
);
|
||||
|
||||
it('forwards true excess args after a yolo runtime', () => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', 'yolo', 'claude', '--print', 'hi']);
|
||||
|
||||
expect(handler).toHaveBeenCalledWith('claude', ['--print', 'hi'], true);
|
||||
});
|
||||
|
||||
it('rejects an unknown runtime under yolo without invoking the handler', () => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
|
||||
expect(() => program.parse(['node', 'mosaic', 'yolo', 'bogus'])).toThrow('process.exit called');
|
||||
expect(handler).not.toHaveBeenCalled();
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
});
|
||||
});
|
||||
@@ -757,23 +757,8 @@ function runUpgrade(args: string[]): never {
|
||||
|
||||
// ─── Commander registration ─────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Handler invoked when a runtime subcommand (`<runtime>` or `yolo <runtime>`)
|
||||
* is parsed. Exposed so tests can exercise the commander wiring without
|
||||
* spawning subprocesses.
|
||||
*/
|
||||
export type RuntimeLaunchHandler = (
|
||||
runtime: RuntimeName,
|
||||
extraArgs: string[],
|
||||
yolo: boolean,
|
||||
) => void;
|
||||
|
||||
/**
|
||||
* Wire `<runtime>` and `yolo <runtime>` subcommands onto `program` using a
|
||||
* pluggable launch handler. Separated from `registerLaunchCommands` so tests
|
||||
* can inject a spy and verify argument forwarding.
|
||||
*/
|
||||
export function registerRuntimeLaunchers(program: Command, handler: RuntimeLaunchHandler): void {
|
||||
export function registerLaunchCommands(program: Command): void {
|
||||
// Runtime launchers
|
||||
for (const runtime of ['claude', 'codex', 'opencode', 'pi'] as const) {
|
||||
program
|
||||
.command(runtime)
|
||||
@@ -781,10 +766,11 @@ export function registerRuntimeLaunchers(program: Command, handler: RuntimeLaunc
|
||||
.allowUnknownOption(true)
|
||||
.allowExcessArguments(true)
|
||||
.action((_opts: unknown, cmd: Command) => {
|
||||
handler(runtime, cmd.args, false);
|
||||
launchRuntime(runtime, cmd.args, false);
|
||||
});
|
||||
}
|
||||
|
||||
// Yolo mode
|
||||
program
|
||||
.command('yolo <runtime>')
|
||||
.description('Launch a runtime in dangerous-permissions mode (claude|codex|opencode|pi)')
|
||||
@@ -798,20 +784,7 @@ export function registerRuntimeLaunchers(program: Command, handler: RuntimeLaunc
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
// Commander includes declared positional arguments (`<runtime>`) in
|
||||
// `cmd.args` alongside any trailing excess args. Slice off the first
|
||||
// element so we forward only true excess args — otherwise the runtime
|
||||
// name leaks into the underlying CLI as an initial positional arg,
|
||||
// which Claude Code interprets as the first user message.
|
||||
// Regression test: launch.spec.ts, issue mosaicstack/stack#454.
|
||||
handler(runtime as RuntimeName, cmd.args.slice(1), true);
|
||||
});
|
||||
}
|
||||
|
||||
export function registerLaunchCommands(program: Command): void {
|
||||
// Runtime launchers + yolo mode wired to the real process-replacing launcher.
|
||||
registerRuntimeLaunchers(program, (runtime, extraArgs, yolo) => {
|
||||
launchRuntime(runtime, extraArgs, yolo);
|
||||
launchRuntime(runtime as RuntimeName, cmd.args, true);
|
||||
});
|
||||
|
||||
// Coord (mission orchestrator)
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { mkdtempSync, mkdirSync, writeFileSync, rmSync, readFileSync, existsSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
import { FileConfigAdapter, DEFAULT_SEED_FILES } from './file-adapter.js';
|
||||
|
||||
/**
|
||||
* Regression tests for the `FileConfigAdapter.syncFramework` seed behavior.
|
||||
*
|
||||
* Background: the bash installer (`framework/install.sh`) and this TS wizard
|
||||
* path both seed framework-contract files from `framework/defaults/` into the
|
||||
* user's mosaic home on first install. Before this fix:
|
||||
*
|
||||
* - The bash installer only seeded `AGENTS.md` and `STANDARDS.md`, leaving
|
||||
* `TOOLS.md` missing despite it being listed as mandatory in the
|
||||
* AGENTS.md load order (position 5).
|
||||
* - The TS wizard iterated every file in `defaults/` and copied it to the
|
||||
* mosaic home root — including `defaults/SOUL.md` (hardcoded "Jarvis"),
|
||||
* `defaults/USER.md` (placeholder), and internal framework files like
|
||||
* `README.md` and `AUDIT-*.md`. That clobbered the identity flow on
|
||||
* fresh installs and leaked framework-internal clutter into the user's
|
||||
* home directory.
|
||||
*
|
||||
* This suite pins the whitelist and the preservation semantics so both
|
||||
* regressions stay fixed.
|
||||
*/
|
||||
|
||||
function makeFixture(): { sourceDir: string; mosaicHome: string; defaultsDir: string } {
|
||||
const root = mkdtempSync(join(tmpdir(), 'mosaic-file-adapter-'));
|
||||
const sourceDir = join(root, 'source');
|
||||
const mosaicHome = join(root, 'mosaic-home');
|
||||
const defaultsDir = join(sourceDir, 'defaults');
|
||||
|
||||
mkdirSync(defaultsDir, { recursive: true });
|
||||
mkdirSync(mosaicHome, { recursive: true });
|
||||
|
||||
// Framework-contract defaults we expect the wizard to seed.
|
||||
writeFileSync(join(defaultsDir, 'AGENTS.md'), '# AGENTS default\n');
|
||||
writeFileSync(join(defaultsDir, 'STANDARDS.md'), '# STANDARDS default\n');
|
||||
writeFileSync(join(defaultsDir, 'TOOLS.md'), '# TOOLS default\n');
|
||||
|
||||
// Non-contract files we must NOT seed on first install.
|
||||
writeFileSync(join(defaultsDir, 'SOUL.md'), '# SOUL default (should not be seeded)\n');
|
||||
writeFileSync(join(defaultsDir, 'USER.md'), '# USER default (should not be seeded)\n');
|
||||
writeFileSync(join(defaultsDir, 'README.md'), '# README (framework-internal)\n');
|
||||
writeFileSync(
|
||||
join(defaultsDir, 'AUDIT-2026-02-17-framework-consistency.md'),
|
||||
'# Audit snapshot\n',
|
||||
);
|
||||
|
||||
return { sourceDir, mosaicHome, defaultsDir };
|
||||
}
|
||||
|
||||
describe('FileConfigAdapter.syncFramework — defaults seeding', () => {
|
||||
let fixture: ReturnType<typeof makeFixture>;
|
||||
|
||||
beforeEach(() => {
|
||||
fixture = makeFixture();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(join(fixture.sourceDir, '..'), { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('seeds the three framework-contract files on a fresh mosaic home', async () => {
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
|
||||
await adapter.syncFramework('fresh');
|
||||
|
||||
for (const name of DEFAULT_SEED_FILES) {
|
||||
expect(existsSync(join(fixture.mosaicHome, name))).toBe(true);
|
||||
}
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'TOOLS.md'), 'utf-8')).toContain(
|
||||
'# TOOLS default',
|
||||
);
|
||||
});
|
||||
|
||||
it('does NOT seed SOUL.md or USER.md from defaults/ (wizard stages own those)', async () => {
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
|
||||
await adapter.syncFramework('fresh');
|
||||
|
||||
// SOUL.md and USER.md live in defaults/ for historical reasons, but they
|
||||
// are template-rendered per-user by the wizard stages. Seeding them here
|
||||
// would clobber the identity flow and leak placeholder content.
|
||||
expect(existsSync(join(fixture.mosaicHome, 'SOUL.md'))).toBe(false);
|
||||
expect(existsSync(join(fixture.mosaicHome, 'USER.md'))).toBe(false);
|
||||
});
|
||||
|
||||
it('does NOT seed README.md or AUDIT-*.md from defaults/', async () => {
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
|
||||
await adapter.syncFramework('fresh');
|
||||
|
||||
expect(existsSync(join(fixture.mosaicHome, 'README.md'))).toBe(false);
|
||||
expect(existsSync(join(fixture.mosaicHome, 'AUDIT-2026-02-17-framework-consistency.md'))).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it('preserves existing contract files — never overwrites user customization', async () => {
|
||||
// Also plant a root-level AGENTS.md in sourceDir so that `syncDirectory`
|
||||
// itself (not just the seed loop) has something to try to overwrite.
|
||||
// Without this, the test would silently pass even if preserve semantics
|
||||
// were broken in syncDirectory.
|
||||
writeFileSync(join(fixture.sourceDir, 'AGENTS.md'), '# shipped AGENTS from source root\n');
|
||||
|
||||
writeFileSync(join(fixture.mosaicHome, 'TOOLS.md'), '# user-customized TOOLS\n');
|
||||
writeFileSync(join(fixture.mosaicHome, 'AGENTS.md'), '# user-customized AGENTS\n');
|
||||
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
await adapter.syncFramework('keep');
|
||||
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'TOOLS.md'), 'utf-8')).toBe(
|
||||
'# user-customized TOOLS\n',
|
||||
);
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'AGENTS.md'), 'utf-8')).toBe(
|
||||
'# user-customized AGENTS\n',
|
||||
);
|
||||
// And the missing contract file still gets seeded.
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'STANDARDS.md'), 'utf-8')).toContain(
|
||||
'# STANDARDS default',
|
||||
);
|
||||
});
|
||||
|
||||
it('is a no-op for seeding when defaults/ dir does not exist', async () => {
|
||||
rmSync(fixture.defaultsDir, { recursive: true });
|
||||
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
await expect(adapter.syncFramework('fresh')).resolves.toBeUndefined();
|
||||
|
||||
expect(existsSync(join(fixture.mosaicHome, 'TOOLS.md'))).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -1,19 +1,5 @@
|
||||
import { readFileSync, existsSync, statSync, copyFileSync } from 'node:fs';
|
||||
import { readFileSync, existsSync, readdirSync, statSync, copyFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
|
||||
/**
|
||||
* Framework-contract files that `syncFramework` seeds from `framework/defaults/`
|
||||
* into the mosaic home root on first install. These are the only files the
|
||||
* wizard is allowed to touch as a one-time seed — SOUL.md and USER.md are
|
||||
* generated from templates by their respective wizard stages with
|
||||
* user-supplied values, and anything else under `defaults/` (README.md,
|
||||
* audit snapshots, etc.) is framework-internal and must not leak into the
|
||||
* user's mosaic home.
|
||||
*
|
||||
* This list must match the explicit seed loop in
|
||||
* packages/mosaic/framework/install.sh.
|
||||
*/
|
||||
export const DEFAULT_SEED_FILES = ['AGENTS.md', 'STANDARDS.md', 'TOOLS.md'] as const;
|
||||
import type { ConfigService, ConfigSection, ResolvedConfig } from './config-service.js';
|
||||
import type { SoulConfig, UserConfig, ToolsConfig, InstallAction } from '../types.js';
|
||||
import { soulSchema, userSchema, toolsSchema } from './schemas.js';
|
||||
@@ -145,24 +131,9 @@ export class FileConfigAdapter implements ConfigService {
|
||||
}
|
||||
|
||||
async syncFramework(action: InstallAction): Promise<void> {
|
||||
// Must match PRESERVE_PATHS in packages/mosaic/framework/install.sh so
|
||||
// the bash and TS install paths have the same upgrade-preservation
|
||||
// semantics. Contract files (AGENTS.md, STANDARDS.md, TOOLS.md) are
|
||||
// seeded from defaults/ on first install and preserved thereafter;
|
||||
// identity files (SOUL.md, USER.md) are generated by wizard stages and
|
||||
// must never be touched by the framework sync.
|
||||
const preservePaths =
|
||||
action === 'keep' || action === 'reconfigure'
|
||||
? [
|
||||
'AGENTS.md',
|
||||
'SOUL.md',
|
||||
'USER.md',
|
||||
'TOOLS.md',
|
||||
'STANDARDS.md',
|
||||
'memory',
|
||||
'sources',
|
||||
'credentials',
|
||||
]
|
||||
? ['SOUL.md', 'USER.md', 'TOOLS.md', 'memory']
|
||||
: [];
|
||||
|
||||
syncDirectory(this.sourceDir, this.mosaicHome, {
|
||||
@@ -170,26 +141,23 @@ export class FileConfigAdapter implements ConfigService {
|
||||
excludeGit: true,
|
||||
});
|
||||
|
||||
// Copy framework-contract files (AGENTS.md, STANDARDS.md, TOOLS.md)
|
||||
// from framework/defaults/ into the mosaic home root if they don't
|
||||
// exist yet. These are written on first install only and are never
|
||||
// overwritten afterwards — the user may have customized them.
|
||||
//
|
||||
// SOUL.md and USER.md are deliberately NOT seeded here. They are
|
||||
// generated from templates by the soul/user wizard stages with
|
||||
// user-supplied values; seeding them from defaults would clobber the
|
||||
// identity flow and leak placeholder content into the mosaic home.
|
||||
// Copy default root-level .md files (AGENTS.md, STANDARDS.md, etc.)
|
||||
// from framework/defaults/ into mosaicHome root if they don't exist yet.
|
||||
// These are framework contracts — only written on first install, never
|
||||
// overwritten (user may have customized them).
|
||||
const defaultsDir = join(this.sourceDir, 'defaults');
|
||||
if (existsSync(defaultsDir)) {
|
||||
for (const entry of DEFAULT_SEED_FILES) {
|
||||
const src = join(defaultsDir, entry);
|
||||
for (const entry of readdirSync(defaultsDir)) {
|
||||
const dest = join(this.mosaicHome, entry);
|
||||
if (existsSync(dest)) continue;
|
||||
if (!existsSync(src) || !statSync(src).isFile()) continue;
|
||||
if (!existsSync(dest)) {
|
||||
const src = join(defaultsDir, entry);
|
||||
if (statSync(src).isFile()) {
|
||||
copyFileSync(src, dest);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async readAll(): Promise<ResolvedConfig> {
|
||||
const [soul, user, tools] = await Promise.all([
|
||||
|
||||
@@ -216,30 +216,7 @@ describe('gatewayConfigStage', () => {
|
||||
expect(daemonState.startCalled).toBe(0);
|
||||
});
|
||||
|
||||
it('honors MOSAIC_STORAGE_TIER=standalone in headless path', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'standalone';
|
||||
process.env['MOSAIC_DATABASE_URL'] = 'postgresql://test/db';
|
||||
process.env['MOSAIC_VALKEY_URL'] = 'redis://test:6379';
|
||||
|
||||
const p = buildPrompter();
|
||||
const state = makeState('/home/user/.config/mosaic');
|
||||
|
||||
const result = await gatewayConfigStage(p, state, {
|
||||
host: 'localhost',
|
||||
defaultPort: 14242,
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
expect(result.ready).toBe(true);
|
||||
expect(state.gateway?.tier).toBe('standalone');
|
||||
const envContents = readFileSync(daemonState.envFile, 'utf-8');
|
||||
expect(envContents).toContain('DATABASE_URL=postgresql://test/db');
|
||||
expect(envContents).toContain('VALKEY_URL=redis://test:6379');
|
||||
const mosaicConfig = JSON.parse(readFileSync(daemonState.mosaicConfigFile, 'utf-8'));
|
||||
expect(mosaicConfig.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('accepts deprecated MOSAIC_STORAGE_TIER=team as alias for standalone', async () => {
|
||||
it('honors MOSAIC_STORAGE_TIER=team in headless path', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'team';
|
||||
process.env['MOSAIC_DATABASE_URL'] = 'postgresql://test/db';
|
||||
process.env['MOSAIC_VALKEY_URL'] = 'redis://test:6379';
|
||||
@@ -253,53 +230,13 @@ describe('gatewayConfigStage', () => {
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
// Deprecated alias 'team' maps to 'standalone'
|
||||
expect(result.ready).toBe(true);
|
||||
expect(state.gateway?.tier).toBe('standalone');
|
||||
const mosaicConfig = JSON.parse(readFileSync(daemonState.mosaicConfigFile, 'utf-8'));
|
||||
expect(mosaicConfig.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('honors MOSAIC_STORAGE_TIER=federated in headless path', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
process.env['MOSAIC_DATABASE_URL'] = 'postgresql://test/feddb';
|
||||
process.env['MOSAIC_VALKEY_URL'] = 'redis://test:6379';
|
||||
|
||||
const p = buildPrompter();
|
||||
const state = makeState('/home/user/.config/mosaic');
|
||||
|
||||
const result = await gatewayConfigStage(p, state, {
|
||||
host: 'localhost',
|
||||
defaultPort: 14242,
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
expect(result.ready).toBe(true);
|
||||
expect(state.gateway?.tier).toBe('federated');
|
||||
expect(state.gateway?.tier).toBe('team');
|
||||
const envContents = readFileSync(daemonState.envFile, 'utf-8');
|
||||
expect(envContents).toContain('DATABASE_URL=postgresql://test/feddb');
|
||||
expect(envContents).toContain('DATABASE_URL=postgresql://test/db');
|
||||
expect(envContents).toContain('VALKEY_URL=redis://test:6379');
|
||||
const mosaicConfig = JSON.parse(readFileSync(daemonState.mosaicConfigFile, 'utf-8'));
|
||||
expect(mosaicConfig.tier).toBe('federated');
|
||||
expect(mosaicConfig.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('rejects an unknown MOSAIC_STORAGE_TIER value in headless mode with a descriptive warning', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federatd'; // deliberate typo
|
||||
|
||||
const warnFn = vi.fn();
|
||||
const p = buildPrompter({ warn: warnFn });
|
||||
const state = makeState('/home/user/.config/mosaic');
|
||||
|
||||
const result = await gatewayConfigStage(p, state, {
|
||||
host: 'localhost',
|
||||
defaultPort: 14242,
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
// The stage surfaces validation errors as ready:false (warning is shown to the user).
|
||||
expect(result.ready).toBe(false);
|
||||
// The warning message must name all three valid values.
|
||||
expect(warnFn).toHaveBeenCalledWith(expect.stringMatching(/local.*standalone.*federated/i));
|
||||
expect(mosaicConfig.tier).toBe('team');
|
||||
});
|
||||
|
||||
it('regenerates config when portOverride differs from saved GATEWAY_PORT', async () => {
|
||||
|
||||
@@ -84,15 +84,10 @@ async function promptTier(p: WizardPrompter): Promise<GatewayStorageTier> {
|
||||
hint: 'embedded database, no dependencies',
|
||||
},
|
||||
{
|
||||
value: 'standalone',
|
||||
label: 'Standalone',
|
||||
value: 'team',
|
||||
label: 'Team',
|
||||
hint: 'PostgreSQL + Valkey required',
|
||||
},
|
||||
{
|
||||
value: 'federated',
|
||||
label: 'Federated',
|
||||
hint: 'PostgreSQL + Valkey + pgvector, federation server+client',
|
||||
},
|
||||
],
|
||||
});
|
||||
return tier;
|
||||
@@ -442,21 +437,7 @@ async function collectAndWriteConfig(
|
||||
p.log('Headless mode detected — reading configuration from environment variables.');
|
||||
|
||||
const storageTierEnv = process.env['MOSAIC_STORAGE_TIER'] ?? 'local';
|
||||
if (storageTierEnv === 'team') {
|
||||
// Deprecated alias — warn and treat as standalone
|
||||
process.stderr.write(
|
||||
'[mosaic] DEPRECATED: MOSAIC_STORAGE_TIER=team is deprecated — use "standalone" instead.\n',
|
||||
);
|
||||
tier = 'standalone';
|
||||
} else if (storageTierEnv === 'standalone' || storageTierEnv === 'federated') {
|
||||
tier = storageTierEnv;
|
||||
} else if (storageTierEnv !== '' && storageTierEnv !== 'local') {
|
||||
throw new GatewayConfigValidationError(
|
||||
`Invalid MOSAIC_STORAGE_TIER="${storageTierEnv}" — expected "local", "standalone", or "federated" (deprecated alias "team" also accepted)`,
|
||||
);
|
||||
} else {
|
||||
tier = 'local';
|
||||
}
|
||||
tier = storageTierEnv === 'team' ? 'team' : 'local';
|
||||
|
||||
const portEnv = process.env['MOSAIC_GATEWAY_PORT'];
|
||||
port = portEnv ? parseInt(portEnv, 10) : opts.defaultPort;
|
||||
@@ -472,13 +453,13 @@ async function collectAndWriteConfig(
|
||||
hostname = hostnameEnv;
|
||||
corsOrigin = corsOverride ?? deriveCorsOrigin(hostnameEnv, 3000);
|
||||
|
||||
if (tier === 'standalone' || tier === 'federated') {
|
||||
if (tier === 'team') {
|
||||
const missing: string[] = [];
|
||||
if (!databaseUrl) missing.push('MOSAIC_DATABASE_URL');
|
||||
if (!valkeyUrl) missing.push('MOSAIC_VALKEY_URL');
|
||||
if (missing.length > 0) {
|
||||
throw new GatewayConfigValidationError(
|
||||
`Headless install with tier=${tier} requires env vars: ` + missing.join(', '),
|
||||
'Headless install with tier=team requires env vars: ' + missing.join(', '),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -486,15 +467,11 @@ async function collectAndWriteConfig(
|
||||
tier = await promptTier(p);
|
||||
port = await promptPort(p, opts.defaultPort);
|
||||
|
||||
if (tier === 'standalone' || tier === 'federated') {
|
||||
const defaultDbUrl =
|
||||
tier === 'federated'
|
||||
? 'postgresql://mosaic:mosaic@localhost:5433/mosaic'
|
||||
: 'postgresql://mosaic:mosaic@localhost:5432/mosaic';
|
||||
if (tier === 'team') {
|
||||
databaseUrl = await p.text({
|
||||
message: 'DATABASE_URL',
|
||||
initialValue: defaultDbUrl,
|
||||
defaultValue: defaultDbUrl,
|
||||
initialValue: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
defaultValue: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
});
|
||||
valkeyUrl = await p.text({
|
||||
message: 'VALKEY_URL',
|
||||
@@ -544,7 +521,7 @@ async function collectAndWriteConfig(
|
||||
`OTEL_SERVICE_NAME=mosaic-gateway`,
|
||||
];
|
||||
|
||||
if ((tier === 'standalone' || tier === 'federated') && databaseUrl && valkeyUrl) {
|
||||
if (tier === 'team' && databaseUrl && valkeyUrl) {
|
||||
envLines.push(`DATABASE_URL=${databaseUrl}`);
|
||||
envLines.push(`VALKEY_URL=${valkeyUrl}`);
|
||||
}
|
||||
@@ -568,18 +545,11 @@ async function collectAndWriteConfig(
|
||||
queue: { type: 'local', dataDir: join(opts.gatewayHome, 'queue') },
|
||||
memory: { type: 'keyword' },
|
||||
}
|
||||
: tier === 'federated'
|
||||
? {
|
||||
tier: 'federated',
|
||||
: {
|
||||
tier: 'team',
|
||||
storage: { type: 'postgres', url: databaseUrl },
|
||||
queue: { type: 'bullmq', url: valkeyUrl },
|
||||
memory: { type: 'pgvector' },
|
||||
}
|
||||
: {
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: databaseUrl },
|
||||
queue: { type: 'bullmq', url: valkeyUrl },
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
writeFileSync(opts.mosaicConfigFile, JSON.stringify(mosaicConfig, null, 2) + '\n', {
|
||||
|
||||
@@ -58,7 +58,7 @@ export interface HooksState {
|
||||
acceptedAt?: string;
|
||||
}
|
||||
|
||||
export type GatewayStorageTier = 'local' | 'standalone' | 'federated';
|
||||
export type GatewayStorageTier = 'local' | 'team';
|
||||
|
||||
export interface GatewayAdminState {
|
||||
name: string;
|
||||
|
||||
@@ -24,9 +24,7 @@
|
||||
"@electric-sql/pglite": "^0.2.17",
|
||||
"@mosaicstack/db": "workspace:^",
|
||||
"@mosaicstack/types": "workspace:*",
|
||||
"commander": "^13.0.0",
|
||||
"ioredis": "^5.10.0",
|
||||
"postgres": "^3.4.8"
|
||||
"commander": "^13.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.8.0",
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type { DbHandle } from '@mosaicstack/db';
|
||||
|
||||
// Mock @mosaicstack/db before importing the adapter
|
||||
vi.mock('@mosaicstack/db', async (importOriginal) => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const actual = await importOriginal<Record<string, any>>();
|
||||
return {
|
||||
...actual,
|
||||
createDb: vi.fn(),
|
||||
runMigrations: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
});
|
||||
|
||||
import { createDb, runMigrations } from '@mosaicstack/db';
|
||||
import { PostgresAdapter } from './postgres.js';
|
||||
|
||||
describe('PostgresAdapter — vector extension gating', () => {
|
||||
let mockExecute: ReturnType<typeof vi.fn>;
|
||||
let mockDb: { execute: ReturnType<typeof vi.fn> };
|
||||
let mockHandle: Pick<DbHandle, 'close'> & { db: typeof mockDb };
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockExecute = vi.fn().mockResolvedValue(undefined);
|
||||
mockDb = { execute: mockExecute };
|
||||
mockHandle = { db: mockDb, close: vi.fn().mockResolvedValue(undefined) };
|
||||
vi.mocked(createDb).mockReturnValue(mockHandle as unknown as DbHandle);
|
||||
});
|
||||
|
||||
it('calls db.execute with CREATE EXTENSION IF NOT EXISTS vector when enableVector=true', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: true,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
// Should have called execute
|
||||
expect(mockExecute).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Verify the SQL contains the extension creation statement.
|
||||
// Prefer Drizzle's public toSQL() API; fall back to queryChunks if unavailable.
|
||||
// NOTE: queryChunks is an undocumented Drizzle internal (drizzle-orm ^0.45.x).
|
||||
// toSQL() was not present on the raw sql`` result in this version — if a future
|
||||
// Drizzle upgrade adds it, remove the fallback path and delete this comment.
|
||||
const sqlObj = mockExecute.mock.calls[0]![0] as {
|
||||
toSQL?: () => { sql: string; params: unknown[] };
|
||||
queryChunks?: Array<{ value: string[] }>;
|
||||
};
|
||||
const sqlText = sqlObj.toSQL
|
||||
? sqlObj.toSQL().sql.toLowerCase()
|
||||
: (sqlObj.queryChunks ?? [])
|
||||
.flatMap((chunk) => chunk.value)
|
||||
.join('')
|
||||
.toLowerCase();
|
||||
expect(sqlText).toContain('create extension if not exists vector');
|
||||
});
|
||||
|
||||
it('does NOT call db.execute for extension when enableVector is false', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: false,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(mockExecute).not.toHaveBeenCalled();
|
||||
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('does NOT call db.execute for extension when enableVector is unset', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(mockExecute).not.toHaveBeenCalled();
|
||||
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('calls runMigrations after the extension is created', async () => {
|
||||
const callOrder: string[] = [];
|
||||
mockExecute.mockImplementation(() => {
|
||||
callOrder.push('execute');
|
||||
return Promise.resolve(undefined);
|
||||
});
|
||||
vi.mocked(runMigrations).mockImplementation(() => {
|
||||
callOrder.push('runMigrations');
|
||||
return Promise.resolve();
|
||||
});
|
||||
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: true,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(callOrder).toEqual(['execute', 'runMigrations']);
|
||||
});
|
||||
});
|
||||
@@ -66,19 +66,13 @@ export class PostgresAdapter implements StorageAdapter {
|
||||
private handle: DbHandle;
|
||||
private db: Db;
|
||||
private url: string;
|
||||
private enableVector: boolean;
|
||||
|
||||
constructor(config: Extract<StorageConfig, { type: 'postgres' }>) {
|
||||
this.url = config.url;
|
||||
this.enableVector = config.enableVector ?? false;
|
||||
this.handle = createDb(config.url);
|
||||
this.db = this.handle.db;
|
||||
}
|
||||
|
||||
private async ensureVectorExtension(): Promise<void> {
|
||||
await this.db.execute(sql`CREATE EXTENSION IF NOT EXISTS vector`);
|
||||
}
|
||||
|
||||
async create<T extends Record<string, unknown>>(
|
||||
collection: string,
|
||||
data: T,
|
||||
@@ -155,9 +149,6 @@ export class PostgresAdapter implements StorageAdapter {
|
||||
}
|
||||
|
||||
async migrate(): Promise<void> {
|
||||
if (this.enableVector) {
|
||||
await this.ensureVectorExtension();
|
||||
}
|
||||
await runMigrations(this.url);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import type { Command } from 'commander';
|
||||
import type { MigrationSource } from './migrate-tier.js';
|
||||
|
||||
/**
|
||||
* Reads the DATABASE_URL environment variable and redacts the password portion.
|
||||
@@ -210,203 +209,6 @@ export function registerStorageCommand(parent: Command): void {
|
||||
}
|
||||
});
|
||||
|
||||
// ── storage migrate-tier ─────────────────────────────────────────────────
|
||||
|
||||
storage
|
||||
.command('migrate-tier')
|
||||
.description('Migrate data from tier: local/standalone → tier: federated (Postgres + pgvector)')
|
||||
.requiredOption(
|
||||
'--to <tier>',
|
||||
'Target tier to migrate to (only "federated" is supported)',
|
||||
'federated',
|
||||
)
|
||||
.requiredOption('--target-url <url>', 'Target federated Postgres connection string (required)')
|
||||
.option(
|
||||
'--source-config <path>',
|
||||
'Path to mosaic.config.json (default: cwd/mosaic.config.json)',
|
||||
)
|
||||
.option('--dry-run', 'Print what would be migrated without writing anything')
|
||||
.option('--yes', 'Skip interactive confirmation prompt (required for non-TTY environments)')
|
||||
.option('--batch-size <n>', 'Rows per transaction batch', '1000')
|
||||
.option('--allow-non-empty', 'Allow writing to a non-empty target (upsert — idempotent)')
|
||||
.action(
|
||||
async (opts: {
|
||||
to: string;
|
||||
targetUrl: string;
|
||||
sourceConfig?: string;
|
||||
dryRun?: boolean;
|
||||
yes?: boolean;
|
||||
batchSize?: string;
|
||||
allowNonEmpty?: boolean;
|
||||
}) => {
|
||||
if (opts.to !== 'federated') {
|
||||
console.error(
|
||||
`[migrate-tier] --to "${opts.to}" is not supported. Only "federated" is allowed.`,
|
||||
);
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
const batchSize = parseInt(opts.batchSize ?? '1000', 10);
|
||||
if (isNaN(batchSize) || batchSize < 1) {
|
||||
console.error('[migrate-tier] --batch-size must be a positive integer.');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// Redact target URL password for display.
|
||||
function redactUrl(url: string): string {
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
if (parsed.password) parsed.password = '***';
|
||||
return parsed.toString();
|
||||
} catch {
|
||||
return url.replace(/:([^@/]+)@/, ':***@');
|
||||
}
|
||||
}
|
||||
|
||||
const redactedTarget = redactUrl(opts.targetUrl);
|
||||
const isDryRun = opts.dryRun ?? false;
|
||||
const allowNonEmpty = opts.allowNonEmpty ?? false;
|
||||
|
||||
// Determine source tier from environment.
|
||||
const sourceTier = activeTier();
|
||||
const sourceDesc = configSource();
|
||||
|
||||
console.log('');
|
||||
console.log('[migrate-tier] ─────────────────────────────────────────');
|
||||
console.log(`[migrate-tier] Source tier: ${sourceTier}`);
|
||||
console.log(`[migrate-tier] Source: ${sourceDesc}`);
|
||||
console.log(`[migrate-tier] Target tier: federated (Postgres + pgvector)`);
|
||||
console.log(`[migrate-tier] Target: ${redactedTarget}`);
|
||||
console.log(`[migrate-tier] Batch size: ${batchSize.toString()}`);
|
||||
console.log(`[migrate-tier] Dry run: ${isDryRun.toString()}`);
|
||||
console.log(`[migrate-tier] Allow non-empty: ${allowNonEmpty.toString()}`);
|
||||
console.log('[migrate-tier] ─────────────────────────────────────────');
|
||||
console.log('');
|
||||
|
||||
// Lazy-import core migration logic to keep the CLI thin.
|
||||
const {
|
||||
runMigrateTier,
|
||||
PostgresMigrationTarget,
|
||||
DrizzleMigrationSource,
|
||||
getMigrationOrder,
|
||||
} = await import('./migrate-tier.js');
|
||||
|
||||
// Build source adapter using Drizzle-backed DrizzleMigrationSource.
|
||||
// Both local (PGlite) and standalone (Postgres) sources expose the same
|
||||
// normalized Drizzle schema — this is where the actual domain data lives.
|
||||
let sourceAdapter: MigrationSource;
|
||||
if (sourceTier === 'pglite') {
|
||||
const { createPgliteDb } = await import('@mosaicstack/db');
|
||||
const pgliteDataDir = process.env['PGLITE_DATA_DIR'];
|
||||
if (!pgliteDataDir) {
|
||||
console.error(
|
||||
'[migrate-tier] PGLITE_DATA_DIR is not set. ' +
|
||||
'Cannot open PGlite source — set it to the data directory path.',
|
||||
);
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
const handle = createPgliteDb(pgliteDataDir);
|
||||
// Local/PGlite sources do not have pgvector registered — the embedding
|
||||
// column is omitted from the insights SELECT and set to null on target.
|
||||
sourceAdapter = new DrizzleMigrationSource(handle.db, /* sourceHasVector= */ false);
|
||||
} else {
|
||||
const { createDb } = await import('@mosaicstack/db');
|
||||
const url = process.env['DATABASE_URL'];
|
||||
if (!url) {
|
||||
console.error('[migrate-tier] DATABASE_URL is not set for postgres source.');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
const handle = createDb(url);
|
||||
// Standalone Postgres may or may not have pgvector — assume it does not
|
||||
// (it is a non-federated tier) so embedding is treated as null.
|
||||
sourceAdapter = new DrizzleMigrationSource(handle.db, /* sourceHasVector= */ false);
|
||||
}
|
||||
|
||||
// Print per-table row counts for the confirmation prompt.
|
||||
const tablesToMigrate = getMigrationOrder();
|
||||
const counts: Array<{ table: string; count: number }> = [];
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = await sourceAdapter.count(table);
|
||||
counts.push({ table, count: n });
|
||||
}
|
||||
|
||||
console.log('[migrate-tier] Source row counts:');
|
||||
for (const { table, count } of counts) {
|
||||
console.log(` ${table}: ${count.toString()}`);
|
||||
}
|
||||
console.log(' sessions: SKIPPED (ephemeral)');
|
||||
console.log(' verifications: SKIPPED (ephemeral)');
|
||||
console.log(' admin_tokens: SKIPPED (environment-specific)');
|
||||
console.log('');
|
||||
|
||||
// Interactive confirmation unless --yes or dry-run.
|
||||
const isTTY = process.stdin.isTTY;
|
||||
if (!isDryRun) {
|
||||
if (!opts.yes && !isTTY) {
|
||||
console.error(
|
||||
'[migrate-tier] Not running in a TTY and --yes was not passed. ' +
|
||||
'Pass --yes to confirm in headless environments.',
|
||||
);
|
||||
process.exitCode = 1;
|
||||
await sourceAdapter.close();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!opts.yes) {
|
||||
const { createInterface } = await import('node:readline');
|
||||
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
||||
const answer = await new Promise<string>((resolve) => {
|
||||
rl.question(`This will WRITE to ${redactedTarget}. Continue? [y/N] `, (ans) => {
|
||||
rl.close();
|
||||
resolve(ans);
|
||||
});
|
||||
});
|
||||
if (answer.trim().toLowerCase() !== 'y') {
|
||||
console.log('[migrate-tier] Aborted.');
|
||||
await sourceAdapter.close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const target = new PostgresMigrationTarget(opts.targetUrl);
|
||||
|
||||
try {
|
||||
const result = await runMigrateTier(
|
||||
sourceAdapter,
|
||||
target,
|
||||
{
|
||||
targetUrl: opts.targetUrl,
|
||||
dryRun: isDryRun,
|
||||
allowNonEmpty,
|
||||
batchSize,
|
||||
onProgress: (msg) => console.log(msg),
|
||||
},
|
||||
/* sourceHasVector= */ sourceTier === 'postgres',
|
||||
);
|
||||
|
||||
if (result.dryRun) {
|
||||
console.log('[migrate-tier] Dry run complete. No data was written.');
|
||||
} else {
|
||||
console.log(
|
||||
`[migrate-tier] Migration complete. ${result.totalRows.toString()} rows migrated.`,
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[migrate-tier] ERROR: ${err instanceof Error ? err.message : String(err)}`,
|
||||
);
|
||||
process.exitCode = 1;
|
||||
} finally {
|
||||
await Promise.all([sourceAdapter.close(), target.close()]);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
// ── storage migrate ──────────────────────────────────────────────────────
|
||||
|
||||
storage
|
||||
|
||||
@@ -1,29 +1,8 @@
|
||||
export type { StorageAdapter, StorageConfig } from './types.js';
|
||||
export { TierDetectionError, detectAndAssertTier, probeServiceHealth } from './tier-detection.js';
|
||||
export type { ServiceCheck, TierHealthReport } from './tier-detection.js';
|
||||
export { createStorageAdapter, registerStorageAdapter } from './factory.js';
|
||||
export { PostgresAdapter } from './adapters/postgres.js';
|
||||
export { PgliteAdapter } from './adapters/pglite.js';
|
||||
export { registerStorageCommand } from './cli.js';
|
||||
export {
|
||||
getMigrationOrder,
|
||||
topoSort,
|
||||
runMigrateTier,
|
||||
checkTargetPreconditions,
|
||||
normaliseSourceRow,
|
||||
PostgresMigrationTarget,
|
||||
DrizzleMigrationSource,
|
||||
SKIP_TABLES,
|
||||
MIGRATION_ORDER,
|
||||
MigrationPreconditionError,
|
||||
} from './migrate-tier.js';
|
||||
export type {
|
||||
MigrationSource,
|
||||
MigrationTarget,
|
||||
MigrateTierOptions,
|
||||
MigrateTierResult,
|
||||
TableMigrationResult,
|
||||
} from './migrate-tier.js';
|
||||
|
||||
import { registerStorageAdapter } from './factory.js';
|
||||
import { PostgresAdapter } from './adapters/postgres.js';
|
||||
|
||||
@@ -1,495 +0,0 @@
|
||||
/**
|
||||
* migrate-tier.spec.ts — Unit tests for the migrate-tier core logic.
|
||||
*
|
||||
* These are pure unit tests — no real database connections.
|
||||
* FED-M1-08 will add integration tests against real services.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import {
|
||||
getMigrationOrder,
|
||||
topoSort,
|
||||
runMigrateTier,
|
||||
checkTargetPreconditions,
|
||||
normaliseSourceRow,
|
||||
SKIP_TABLES,
|
||||
MigrationPreconditionError,
|
||||
type MigrationSource,
|
||||
type MigrationTarget,
|
||||
} from './migrate-tier.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Mock factories */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Build a mock MigrationSource backed by an in-memory table map.
|
||||
* Implements the DrizzleMigrationSource-shaped contract:
|
||||
* - readTable(tableName, opts?) returns paginated rows
|
||||
* - count(tableName) returns row count
|
||||
*
|
||||
* The `sourceHasVector` flag controls whether the mock simulates the
|
||||
* no-pgvector projection: when false and tableName is 'insights', rows
|
||||
* are returned WITHOUT the 'embedding' field (matching DrizzleMigrationSource
|
||||
* behaviour for local/PGlite sources).
|
||||
*/
|
||||
function makeMockSource(
|
||||
data: Record<string, Record<string, unknown>[]>,
|
||||
sourceHasVector = true,
|
||||
): MigrationSource & {
|
||||
readTableCalls: Array<{ table: string; opts?: { limit?: number; offset?: number } }>;
|
||||
} {
|
||||
const readTableCalls: Array<{ table: string; opts?: { limit?: number; offset?: number } }> = [];
|
||||
return {
|
||||
readTableCalls,
|
||||
readTable: vi.fn(async (tableName: string, opts?: { limit?: number; offset?: number }) => {
|
||||
readTableCalls.push({ table: tableName, opts });
|
||||
let rows = data[tableName] ?? [];
|
||||
// Simulate no-vector projection: omit 'embedding' from insights rows
|
||||
// when sourceHasVector is false (matches DrizzleMigrationSource behaviour).
|
||||
if (tableName === 'insights' && !sourceHasVector) {
|
||||
rows = rows.map(({ embedding: _omit, ...rest }) => rest);
|
||||
}
|
||||
const offset = opts?.offset ?? 0;
|
||||
const limit = opts?.limit ?? rows.length;
|
||||
return rows.slice(offset, offset + limit);
|
||||
}),
|
||||
count: vi.fn(async (tableName: string) => (data[tableName] ?? []).length),
|
||||
close: vi.fn(async () => undefined),
|
||||
};
|
||||
}
|
||||
|
||||
function makeMockTarget(opts?: {
|
||||
hasPgvector?: boolean;
|
||||
nonEmptyTable?: string;
|
||||
}): MigrationTarget & { upsertCalls: Array<{ table: string; rows: Record<string, unknown>[] }> } {
|
||||
const upsertCalls: Array<{ table: string; rows: Record<string, unknown>[] }> = [];
|
||||
const storedCounts: Record<string, number> = {};
|
||||
|
||||
return {
|
||||
upsertCalls,
|
||||
upsertBatch: vi.fn(async (table: string, rows: Record<string, unknown>[]) => {
|
||||
upsertCalls.push({ table, rows });
|
||||
storedCounts[table] = (storedCounts[table] ?? 0) + rows.length;
|
||||
}),
|
||||
count: vi.fn(async (table: string) => {
|
||||
if (opts?.nonEmptyTable === table) return 5;
|
||||
return storedCounts[table] ?? 0;
|
||||
}),
|
||||
hasPgvector: vi.fn(async () => opts?.hasPgvector ?? true),
|
||||
close: vi.fn(async () => undefined),
|
||||
};
|
||||
}
|
||||
|
||||
function noopProgress(): (msg: string) => void {
|
||||
return () => undefined;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 1. Topological ordering */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('topoSort', () => {
|
||||
it('returns empty array for empty input', () => {
|
||||
expect(topoSort(new Map())).toEqual([]);
|
||||
});
|
||||
|
||||
it('orders parents before children — linear chain', () => {
|
||||
// users -> teams -> messages
|
||||
const deps = new Map([
|
||||
['users', []],
|
||||
['teams', ['users']],
|
||||
['messages', ['teams']],
|
||||
]);
|
||||
const order = topoSort(deps);
|
||||
expect(order.indexOf('users')).toBeLessThan(order.indexOf('teams'));
|
||||
expect(order.indexOf('teams')).toBeLessThan(order.indexOf('messages'));
|
||||
});
|
||||
|
||||
it('orders parents before children — diamond graph', () => {
|
||||
// a -> (b, c) -> d
|
||||
const deps = new Map([
|
||||
['a', []],
|
||||
['b', ['a']],
|
||||
['c', ['a']],
|
||||
['d', ['b', 'c']],
|
||||
]);
|
||||
const order = topoSort(deps);
|
||||
expect(order.indexOf('a')).toBeLessThan(order.indexOf('b'));
|
||||
expect(order.indexOf('a')).toBeLessThan(order.indexOf('c'));
|
||||
expect(order.indexOf('b')).toBeLessThan(order.indexOf('d'));
|
||||
expect(order.indexOf('c')).toBeLessThan(order.indexOf('d'));
|
||||
});
|
||||
|
||||
it('throws on cyclic dependencies', () => {
|
||||
const deps = new Map([
|
||||
['a', ['b']],
|
||||
['b', ['a']],
|
||||
]);
|
||||
expect(() => topoSort(deps)).toThrow('Cycle detected');
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 2. getMigrationOrder — sessions / verifications excluded */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('getMigrationOrder', () => {
|
||||
it('does not include "sessions"', () => {
|
||||
expect(getMigrationOrder()).not.toContain('sessions');
|
||||
});
|
||||
|
||||
it('does not include "verifications"', () => {
|
||||
expect(getMigrationOrder()).not.toContain('verifications');
|
||||
});
|
||||
|
||||
it('does not include "admin_tokens"', () => {
|
||||
expect(getMigrationOrder()).not.toContain('admin_tokens');
|
||||
});
|
||||
|
||||
it('includes "users" before "teams"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('users')).toBeLessThan(order.indexOf('teams'));
|
||||
});
|
||||
|
||||
it('includes "users" before "conversations"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('users')).toBeLessThan(order.indexOf('conversations'));
|
||||
});
|
||||
|
||||
it('includes "conversations" before "messages"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('conversations')).toBeLessThan(order.indexOf('messages'));
|
||||
});
|
||||
|
||||
it('includes "projects" before "agents"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('projects')).toBeLessThan(order.indexOf('agents'));
|
||||
});
|
||||
|
||||
it('includes "agents" before "conversations"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('agents')).toBeLessThan(order.indexOf('conversations'));
|
||||
});
|
||||
|
||||
it('includes "missions" before "mission_tasks"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('missions')).toBeLessThan(order.indexOf('mission_tasks'));
|
||||
});
|
||||
|
||||
it('includes all expected tables', () => {
|
||||
const order = getMigrationOrder();
|
||||
const expected = [
|
||||
'users',
|
||||
'teams',
|
||||
'accounts',
|
||||
'projects',
|
||||
'agents',
|
||||
'conversations',
|
||||
'messages',
|
||||
'insights',
|
||||
];
|
||||
for (const t of expected) {
|
||||
expect(order).toContain(t);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 3. Dry-run makes no writes */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runMigrateTier — dry-run', () => {
|
||||
it('makes no calls to upsertBatch', async () => {
|
||||
const source = makeMockSource({
|
||||
users: [{ id: 'u1', name: 'Alice', email: 'alice@example.com' }],
|
||||
});
|
||||
const target = makeMockTarget();
|
||||
|
||||
const result = await runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: true,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
expect(target.upsertCalls).toHaveLength(0);
|
||||
expect(result.dryRun).toBe(true);
|
||||
expect(result.totalRows).toBe(0);
|
||||
});
|
||||
|
||||
it('does not call checkTargetPreconditions in dry-run', async () => {
|
||||
// Even if hasPgvector is false, dry-run should not throw.
|
||||
const source = makeMockSource({});
|
||||
const target = makeMockTarget({ hasPgvector: false });
|
||||
|
||||
await expect(
|
||||
runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: true,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
}),
|
||||
).resolves.not.toThrow();
|
||||
|
||||
// hasPgvector should NOT have been called during dry run.
|
||||
expect(target.hasPgvector).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 4. Idempotency */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runMigrateTier — idempotency', () => {
|
||||
it('produces the same logical row count on second run (upsert semantics)', async () => {
|
||||
const userData = [
|
||||
{ id: 'u1', name: 'Alice', email: 'alice@example.com' },
|
||||
{ id: 'u2', name: 'Bob', email: 'bob@example.com' },
|
||||
];
|
||||
|
||||
const source = makeMockSource({ users: userData });
|
||||
|
||||
// First run target.
|
||||
const target1 = makeMockTarget();
|
||||
await runMigrateTier(source, target1, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const firstRunUpserts = target1.upsertCalls.filter((c) => c.table === 'users');
|
||||
const firstRunRows = firstRunUpserts.reduce((acc, c) => acc + c.rows.length, 0);
|
||||
|
||||
// Second run — allowNonEmpty because first run already wrote rows.
|
||||
const target2 = makeMockTarget();
|
||||
await runMigrateTier(source, target2, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: true,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const secondRunUpserts = target2.upsertCalls.filter((c) => c.table === 'users');
|
||||
const secondRunRows = secondRunUpserts.reduce((acc, c) => acc + c.rows.length, 0);
|
||||
|
||||
// Both runs write the same number of rows (upsert — second run updates in place).
|
||||
expect(firstRunRows).toBe(userData.length);
|
||||
expect(secondRunRows).toBe(userData.length);
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 5. Empty-target precondition */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('checkTargetPreconditions', () => {
|
||||
it('throws when target table is non-empty and allowNonEmpty is false', async () => {
|
||||
const target = makeMockTarget({ nonEmptyTable: 'users' });
|
||||
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toThrow(
|
||||
MigrationPreconditionError,
|
||||
);
|
||||
});
|
||||
|
||||
it('includes remediation hint in thrown error', async () => {
|
||||
const target = makeMockTarget({ nonEmptyTable: 'users' });
|
||||
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toMatchObject({
|
||||
name: 'MigrationPreconditionError',
|
||||
remediation: expect.stringContaining('--allow-non-empty'),
|
||||
});
|
||||
});
|
||||
|
||||
it('does NOT throw when allowNonEmpty is true', async () => {
|
||||
const target = makeMockTarget({ nonEmptyTable: 'users' });
|
||||
await expect(checkTargetPreconditions(target, true, ['users'])).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
it('throws when pgvector extension is missing', async () => {
|
||||
const target = makeMockTarget({ hasPgvector: false });
|
||||
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toMatchObject({
|
||||
name: 'MigrationPreconditionError',
|
||||
remediation: expect.stringContaining('pgvector'),
|
||||
});
|
||||
});
|
||||
|
||||
it('passes when target is empty and pgvector is present', async () => {
|
||||
const target = makeMockTarget({ hasPgvector: true });
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 6. Skipped tables documented */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('SKIP_TABLES', () => {
|
||||
it('includes "sessions"', () => {
|
||||
expect(SKIP_TABLES.has('sessions')).toBe(true);
|
||||
});
|
||||
|
||||
it('includes "verifications"', () => {
|
||||
expect(SKIP_TABLES.has('verifications')).toBe(true);
|
||||
});
|
||||
|
||||
it('includes "admin_tokens"', () => {
|
||||
expect(SKIP_TABLES.has('admin_tokens')).toBe(true);
|
||||
});
|
||||
|
||||
it('migration result includes skipped table entries', async () => {
|
||||
const source = makeMockSource({});
|
||||
const target = makeMockTarget();
|
||||
|
||||
const result = await runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const skippedNames = result.tables.filter((t) => t.skipped).map((t) => t.table);
|
||||
expect(skippedNames).toContain('sessions');
|
||||
expect(skippedNames).toContain('verifications');
|
||||
expect(skippedNames).toContain('admin_tokens');
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 7. Embedding NULL on migrate from non-pgvector source */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('normaliseSourceRow — embedding handling', () => {
|
||||
it('sets embedding to null when sourceHasVector is false and table is insights', () => {
|
||||
const row: Record<string, unknown> = {
|
||||
id: 'ins-1',
|
||||
content: 'Some insight',
|
||||
userId: 'u1',
|
||||
};
|
||||
const normalised = normaliseSourceRow('insights', row, false);
|
||||
expect(normalised['embedding']).toBeNull();
|
||||
});
|
||||
|
||||
it('preserves existing embedding when sourceHasVector is true', () => {
|
||||
const embedding = [0.1, 0.2, 0.3];
|
||||
const row: Record<string, unknown> = {
|
||||
id: 'ins-1',
|
||||
content: 'Some insight',
|
||||
userId: 'u1',
|
||||
embedding,
|
||||
};
|
||||
const normalised = normaliseSourceRow('insights', row, true);
|
||||
expect(normalised['embedding']).toBe(embedding);
|
||||
});
|
||||
|
||||
it('does not add embedding field to non-vector tables', () => {
|
||||
const row: Record<string, unknown> = { id: 'u1', name: 'Alice' };
|
||||
const normalised = normaliseSourceRow('users', row, false);
|
||||
expect('embedding' in normalised).toBe(false);
|
||||
});
|
||||
|
||||
it('passes through rows for non-vector tables unchanged', () => {
|
||||
const row: Record<string, unknown> = { id: 'u1', name: 'Alice', email: 'alice@test.com' };
|
||||
const normalised = normaliseSourceRow('users', row, false);
|
||||
expect(normalised).toEqual(row);
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 8. End-to-end: correct order of upsert calls */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runMigrateTier — migration order', () => {
|
||||
it('writes users before messages', async () => {
|
||||
const source = makeMockSource({
|
||||
users: [{ id: 'u1', name: 'Alice', email: 'alice@test.com' }],
|
||||
messages: [{ id: 'm1', conversationId: 'c1', role: 'user', content: 'Hi' }],
|
||||
});
|
||||
const target = makeMockTarget();
|
||||
|
||||
await runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const tableOrder = target.upsertCalls.map((c) => c.table);
|
||||
const usersIdx = tableOrder.indexOf('users');
|
||||
const messagesIdx = tableOrder.indexOf('messages');
|
||||
// users must appear before messages in the upsert call sequence.
|
||||
expect(usersIdx).toBeGreaterThanOrEqual(0);
|
||||
expect(messagesIdx).toBeGreaterThanOrEqual(0);
|
||||
expect(usersIdx).toBeLessThan(messagesIdx);
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 9. Embedding-null projection: no-pgvector source */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('DrizzleMigrationSource embedding-null projection', () => {
|
||||
it(
|
||||
'when sourceHasVector is false, readTable for insights omits embedding column ' +
|
||||
'and normaliseSourceRow sets it to null for the target insert',
|
||||
async () => {
|
||||
// Source has insights data but no vector — embedding omitted at read time.
|
||||
const insightRowWithEmbedding = {
|
||||
id: 'ins-1',
|
||||
userId: 'u1',
|
||||
content: 'Test insight',
|
||||
embedding: [0.1, 0.2, 0.3], // present in raw data but omitted by source
|
||||
source: 'agent',
|
||||
category: 'general',
|
||||
relevanceScore: 1.0,
|
||||
};
|
||||
|
||||
// makeMockSource with sourceHasVector=false simulates DrizzleMigrationSource
|
||||
// behaviour: the embedding field is stripped from the returned row.
|
||||
const source = makeMockSource(
|
||||
{
|
||||
users: [{ id: 'u1', name: 'Alice', email: 'alice@test.com' }],
|
||||
insights: [insightRowWithEmbedding],
|
||||
},
|
||||
/* sourceHasVector= */ false,
|
||||
);
|
||||
const target = makeMockTarget();
|
||||
|
||||
await runMigrateTier(
|
||||
source,
|
||||
target,
|
||||
{
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
},
|
||||
/* sourceHasVector= */ false,
|
||||
);
|
||||
|
||||
// Assert: readTable was called for insights
|
||||
const insightsRead = source.readTableCalls.find((c) => c.table === 'insights');
|
||||
expect(insightsRead).toBeDefined();
|
||||
|
||||
// Assert: the upsert to insights has embedding === null (not the original vector)
|
||||
const insightsUpsert = target.upsertCalls.find((c) => c.table === 'insights');
|
||||
expect(insightsUpsert).toBeDefined();
|
||||
const upsertedRow = insightsUpsert!.rows[0];
|
||||
expect(upsertedRow).toBeDefined();
|
||||
// embedding must be null — not the original [0.1, 0.2, 0.3]
|
||||
expect(upsertedRow!['embedding']).toBeNull();
|
||||
// Other fields must pass through unchanged
|
||||
expect(upsertedRow!['id']).toBe('ins-1');
|
||||
expect(upsertedRow!['content']).toBe('Test insight');
|
||||
},
|
||||
);
|
||||
});
|
||||
@@ -1,697 +0,0 @@
|
||||
/**
|
||||
* migrate-tier.ts — Core logic for `mosaic storage migrate-tier`.
|
||||
*
|
||||
* Migrates data from `tier: local` (PGlite, normalized Drizzle schema) or
|
||||
* `tier: standalone` (Postgres without pgvector) → `tier: federated`
|
||||
* (Postgres + pgvector).
|
||||
*
|
||||
* Source: DrizzleMigrationSource — reads from the NORMALIZED Drizzle/relational
|
||||
* schema tables (not the flat `id TEXT + data JSONB` PgliteAdapter schema).
|
||||
* Both local (PGlite) and standalone (Postgres) sources use the same Drizzle
|
||||
* abstraction via createPgliteDb() or createDb() from @mosaicstack/db.
|
||||
* Target: PostgresMigrationTarget — upserts via raw SQL into the same schema.
|
||||
*
|
||||
* Key design decisions:
|
||||
* - Tables are migrated in topological (FK-dependency) order so that
|
||||
* parent rows exist before child rows are inserted.
|
||||
* - sessions + verifications are skipped — they are ephemeral / TTL'd.
|
||||
* - adminTokens is skipped — token hashes are environment-specific
|
||||
* and should be re-issued on the target.
|
||||
* - insights.embedding is omitted from source SELECT when the source lacks
|
||||
* pgvector (local/PGlite tier); target insert gets NULL for that column.
|
||||
* insights.embedding is nullable per schema (no .notNull() constraint).
|
||||
* - Each table's batch is wrapped in a transaction for atomicity.
|
||||
* - Upsert semantics (ON CONFLICT DO UPDATE) make re-runs idempotent.
|
||||
*
|
||||
* TODO (FED-M1-08): Add integration tests against real PGlite → real PG.
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import * as schema from '@mosaicstack/db';
|
||||
import { sql as drizzleSql } from '@mosaicstack/db';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Types */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export interface MigrationSource {
|
||||
/**
|
||||
* Return all rows from a table (normalized Drizzle schema rows).
|
||||
* When sourceHasVector is false and the table has a vector column,
|
||||
* the source MUST omit the vector column from the result and the
|
||||
* caller will set it to null (see normaliseSourceRow).
|
||||
*/
|
||||
readTable(
|
||||
tableName: string,
|
||||
opts?: { limit?: number; offset?: number },
|
||||
): Promise<Record<string, unknown>[]>;
|
||||
|
||||
/** Count rows in a table. */
|
||||
count(tableName: string): Promise<number>;
|
||||
|
||||
/** Close the source connection. */
|
||||
close(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface MigrationTarget {
|
||||
/**
|
||||
* Upsert a batch of rows into a table.
|
||||
* Must use ON CONFLICT (id) DO UPDATE semantics.
|
||||
*/
|
||||
upsertBatch(tableName: string, rows: Record<string, unknown>[]): Promise<void>;
|
||||
|
||||
/**
|
||||
* Count rows in a target table.
|
||||
*/
|
||||
count(tableName: string): Promise<number>;
|
||||
|
||||
/**
|
||||
* Check whether pgvector extension is installed.
|
||||
*/
|
||||
hasPgvector(): Promise<boolean>;
|
||||
|
||||
/** Close the target connection. */
|
||||
close(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface MigrateTierOptions {
|
||||
/** Target postgres connection URL. */
|
||||
targetUrl: string;
|
||||
/** Whether to skip all writes (dry-run). */
|
||||
dryRun: boolean;
|
||||
/** Skip the non-empty target guard. */
|
||||
allowNonEmpty: boolean;
|
||||
/** Rows per transaction batch. */
|
||||
batchSize: number;
|
||||
/** Called with progress messages. */
|
||||
onProgress: (msg: string) => void;
|
||||
}
|
||||
|
||||
export interface TableMigrationResult {
|
||||
table: string;
|
||||
rowsMigrated: number;
|
||||
skipped: boolean;
|
||||
skipReason?: string;
|
||||
}
|
||||
|
||||
export interface MigrateTierResult {
|
||||
tables: TableMigrationResult[];
|
||||
totalRows: number;
|
||||
dryRun: boolean;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Schema: FK-aware topological table order */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* SKIP_TABLES: ephemeral or environment-specific tables not worth migrating.
|
||||
*
|
||||
* - sessions: TTL'd auth sessions — invalid in new environment.
|
||||
* - verifications: one-time tokens (email verify, etc.) — already expired.
|
||||
* - admin_tokens: hashed tokens bound to old environment keys — re-issue.
|
||||
*/
|
||||
export const SKIP_TABLES = new Set(['sessions', 'verifications', 'admin_tokens']);
|
||||
|
||||
/**
|
||||
* Topologically ordered table list (parents before children).
|
||||
*
|
||||
* Derived from FK references in packages/db/src/schema.ts:
|
||||
*
|
||||
* users (no FKs)
|
||||
* teams → users
|
||||
* team_members → teams, users
|
||||
* accounts → users
|
||||
* projects → users, teams
|
||||
* agents → projects, users
|
||||
* missions → projects, users
|
||||
* tasks → projects, missions
|
||||
* mission_tasks → missions, tasks, users
|
||||
* conversations → users, projects, agents
|
||||
* messages → conversations
|
||||
* preferences → users
|
||||
* insights → users [has embedding vector column]
|
||||
* agent_logs → users
|
||||
* skills → users (installedBy, nullable)
|
||||
* routing_rules → users (userId, nullable)
|
||||
* provider_credentials → users
|
||||
* appreciations (no FKs)
|
||||
* events (no FKs)
|
||||
* tickets (no FKs)
|
||||
* summarization_jobs (no FKs)
|
||||
*
|
||||
* Skipped (not in this list):
|
||||
* sessions → users (ephemeral)
|
||||
* verifications (no FKs, ephemeral)
|
||||
* admin_tokens → users (environment-specific)
|
||||
*/
|
||||
export const MIGRATION_ORDER: string[] = [
|
||||
'users',
|
||||
'teams',
|
||||
'team_members',
|
||||
'accounts',
|
||||
'projects',
|
||||
'agents',
|
||||
'missions',
|
||||
'tasks',
|
||||
'mission_tasks',
|
||||
'conversations',
|
||||
'messages',
|
||||
'preferences',
|
||||
'insights',
|
||||
'agent_logs',
|
||||
'skills',
|
||||
'routing_rules',
|
||||
'provider_credentials',
|
||||
'appreciations',
|
||||
'events',
|
||||
'tickets',
|
||||
'summarization_jobs',
|
||||
];
|
||||
|
||||
/** Tables that carry a vector embedding column on the target. */
|
||||
const VECTOR_TABLES = new Set(['insights']);
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Utility: derive topological order from an adjacency list */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Given an adjacency list (table → list of tables it depends on),
|
||||
* return a valid topological ordering (Kahn's algorithm).
|
||||
*
|
||||
* Exposed for unit testing.
|
||||
*/
|
||||
export function topoSort(deps: Map<string, string[]>): string[] {
|
||||
const nodes = [...deps.keys()];
|
||||
const inDegree = new Map<string, number>();
|
||||
const adjReverse = new Map<string, string[]>();
|
||||
|
||||
for (const node of nodes) {
|
||||
if (!inDegree.has(node)) inDegree.set(node, 0);
|
||||
if (!adjReverse.has(node)) adjReverse.set(node, []);
|
||||
for (const dep of deps.get(node) ?? []) {
|
||||
inDegree.set(node, (inDegree.get(node) ?? 0) + 1);
|
||||
if (!adjReverse.has(dep)) adjReverse.set(dep, []);
|
||||
adjReverse.get(dep)!.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
// Start with nodes that have no dependencies.
|
||||
const queue: string[] = [];
|
||||
for (const [node, deg] of inDegree) {
|
||||
if (deg === 0) queue.push(node);
|
||||
}
|
||||
|
||||
const result: string[] = [];
|
||||
while (queue.length > 0) {
|
||||
const node = queue.shift()!;
|
||||
result.push(node);
|
||||
for (const dependent of adjReverse.get(node) ?? []) {
|
||||
const newDeg = (inDegree.get(dependent) ?? 0) - 1;
|
||||
inDegree.set(dependent, newDeg);
|
||||
if (newDeg === 0) queue.push(dependent);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.length !== nodes.length) {
|
||||
throw new Error('Cycle detected in FK dependency graph');
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the migration table order, excluding SKIP_TABLES.
|
||||
* Uses the pre-computed MIGRATION_ORDER (verified against schema.ts).
|
||||
*/
|
||||
export function getMigrationOrder(): string[] {
|
||||
return MIGRATION_ORDER.filter((t) => !SKIP_TABLES.has(t));
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* TABLE_OBJECTS: migration table name → Drizzle table object */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Maps MIGRATION_ORDER table names to their corresponding Drizzle table
|
||||
* objects from the normalized schema. Used by DrizzleMigrationSource to
|
||||
* execute typed `db.select().from(table)` queries.
|
||||
*
|
||||
* Keyed by snake_case table name (matching MIGRATION_ORDER + SKIP_TABLES).
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const TABLE_OBJECTS: Record<string, any> = {
|
||||
users: schema.users,
|
||||
teams: schema.teams,
|
||||
team_members: schema.teamMembers,
|
||||
accounts: schema.accounts,
|
||||
projects: schema.projects,
|
||||
agents: schema.agents,
|
||||
missions: schema.missions,
|
||||
tasks: schema.tasks,
|
||||
mission_tasks: schema.missionTasks,
|
||||
conversations: schema.conversations,
|
||||
messages: schema.messages,
|
||||
preferences: schema.preferences,
|
||||
insights: schema.insights,
|
||||
agent_logs: schema.agentLogs,
|
||||
skills: schema.skills,
|
||||
routing_rules: schema.routingRules,
|
||||
provider_credentials: schema.providerCredentials,
|
||||
appreciations: schema.appreciations,
|
||||
events: schema.events,
|
||||
tickets: schema.tickets,
|
||||
summarization_jobs: schema.summarizationJobs,
|
||||
// Skipped tables — included so count() works for preflight but never passed
|
||||
// to upsertBatch.
|
||||
sessions: schema.sessions,
|
||||
verifications: schema.verifications,
|
||||
admin_tokens: schema.adminTokens,
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* DrizzleMigrationSource */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* MigrationSource backed by a Drizzle DB handle (works with both
|
||||
* PostgresJsDatabase and PgliteDatabase — they share the same Drizzle
|
||||
* query API for schema-defined tables).
|
||||
*
|
||||
* For the `insights` table (the only vector-column table), when the source
|
||||
* lacks pgvector (local/PGlite tier), the `embedding` column is excluded
|
||||
* from the SELECT projection via a raw `db.execute()` query that lists
|
||||
* only non-vector columns. This prevents a type-registration error from
|
||||
* PGlite, which does not know the `vector` type. The caller (runMigrateTier
|
||||
* via normaliseSourceRow) will set embedding to null on the resulting rows.
|
||||
*
|
||||
* Column projection is opt-in: pass `sourceHasVector: false` to activate it.
|
||||
*/
|
||||
export class DrizzleMigrationSource implements MigrationSource {
|
||||
constructor(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
private readonly db: any,
|
||||
private readonly sourceHasVector: boolean = true,
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Columns of the insights table that do NOT include the vector embedding.
|
||||
* Used for the no-pgvector projection path.
|
||||
*/
|
||||
private static readonly INSIGHTS_COLUMNS_NO_VECTOR = [
|
||||
'id',
|
||||
'user_id',
|
||||
'content',
|
||||
'source',
|
||||
'category',
|
||||
'relevance_score',
|
||||
'metadata',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'decayed_at',
|
||||
] as const;
|
||||
|
||||
async readTable(
|
||||
tableName: string,
|
||||
opts?: { limit?: number; offset?: number },
|
||||
): Promise<Record<string, unknown>[]> {
|
||||
const table = TABLE_OBJECTS[tableName];
|
||||
if (!table) throw new Error(`DrizzleMigrationSource: unknown table "${tableName}"`);
|
||||
|
||||
// For vector tables when source lacks pgvector: use column-allowlist raw query
|
||||
// to avoid type-registration errors.
|
||||
if (VECTOR_TABLES.has(tableName) && !this.sourceHasVector) {
|
||||
const cols = DrizzleMigrationSource.INSIGHTS_COLUMNS_NO_VECTOR.map((c) => `"${c}"`).join(
|
||||
', ',
|
||||
);
|
||||
let sql = `SELECT ${cols} FROM "${tableName}"`;
|
||||
const params: unknown[] = [];
|
||||
if (opts?.limit !== undefined) {
|
||||
params.push(opts.limit);
|
||||
sql += ` LIMIT $${params.length.toString()}`;
|
||||
}
|
||||
if (opts?.offset !== undefined) {
|
||||
params.push(opts.offset);
|
||||
sql += ` OFFSET $${params.length.toString()}`;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const result = await (this.db as any).execute(
|
||||
// drizzle-orm/pglite and drizzle-orm/postgres-js both accept a raw
|
||||
// SQL template; use the tagged-template sql helper from drizzle-orm.
|
||||
// Since we need dynamic params, we use db.execute with a raw string
|
||||
// via the PGlite/postgres.js driver directly.
|
||||
{ sql, params, typings: [] },
|
||||
);
|
||||
// drizzle execute returns { rows: unknown[][] } for PGlite driver,
|
||||
// or a RowList for postgres.js. Normalise both shapes.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const raw = result as any;
|
||||
if (Array.isArray(raw)) {
|
||||
// postgres.js shape: array of row objects
|
||||
return raw as Record<string, unknown>[];
|
||||
}
|
||||
if (raw?.rows && Array.isArray(raw.rows)) {
|
||||
// PGlite shape: { rows: unknown[][] } OR { rows: Record<string,unknown>[] }
|
||||
const rows = raw.rows as unknown[];
|
||||
if (rows.length === 0) return [];
|
||||
if (Array.isArray(rows[0])) {
|
||||
// Columnar: convert to objects using fields array if available
|
||||
const fields: string[] =
|
||||
(raw.fields as Array<{ name: string }> | undefined)?.map((f) => f.name) ??
|
||||
DrizzleMigrationSource.INSIGHTS_COLUMNS_NO_VECTOR.slice();
|
||||
return (rows as unknown[][]).map((row) => {
|
||||
const obj: Record<string, unknown> = {};
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
obj[fields[i]!] = row[i];
|
||||
}
|
||||
return obj;
|
||||
});
|
||||
}
|
||||
return rows as Record<string, unknown>[];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
// Standard Drizzle select for all other tables (and vector tables when
|
||||
// the source has pgvector registered).
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let query = (this.db as any).select().from(table);
|
||||
if (opts?.limit !== undefined) query = query.limit(opts.limit);
|
||||
if (opts?.offset !== undefined) query = query.offset(opts.offset);
|
||||
return (await query) as Record<string, unknown>[];
|
||||
}
|
||||
|
||||
async count(tableName: string): Promise<number> {
|
||||
const table = TABLE_OBJECTS[tableName];
|
||||
if (!table) throw new Error(`DrizzleMigrationSource: unknown table "${tableName}"`);
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const [row] = await (this.db as any)
|
||||
.select({ n: drizzleSql<number>`COUNT(*)::int` })
|
||||
.from(table);
|
||||
return (row as { n: number } | undefined)?.n ?? 0;
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
// Lifecycle managed externally — caller closes the db handle.
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Real postgres target adapter */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Live implementation of MigrationTarget backed by a real Postgres connection.
|
||||
* Used by the CLI; mocked in tests.
|
||||
*/
|
||||
export class PostgresMigrationTarget implements MigrationTarget {
|
||||
private sql: ReturnType<typeof postgres>;
|
||||
|
||||
constructor(url: string) {
|
||||
this.sql = postgres(url, {
|
||||
max: 5,
|
||||
connect_timeout: 10,
|
||||
idle_timeout: 30,
|
||||
});
|
||||
}
|
||||
|
||||
async upsertBatch(tableName: string, rows: Record<string, unknown>[]): Promise<void> {
|
||||
if (rows.length === 0) return;
|
||||
|
||||
// Collect all column names from the batch (union of all row keys).
|
||||
const colSet = new Set<string>();
|
||||
for (const row of rows) {
|
||||
for (const k of Object.keys(row)) colSet.add(k);
|
||||
}
|
||||
const cols = [...colSet];
|
||||
if (!cols.includes('id')) {
|
||||
throw new Error(`Table ${tableName}: rows missing 'id' column`);
|
||||
}
|
||||
|
||||
// Build VALUES list — use postgres tagged-template helpers for safety.
|
||||
// postgres.js supports bulk inserts via array of objects.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await this.sql.begin(async (tx: any) => {
|
||||
// Insert in chunks to avoid enormous single queries.
|
||||
for (let i = 0; i < rows.length; i += 500) {
|
||||
const chunk = rows.slice(i, i + 500);
|
||||
|
||||
// Normalise rows: fill missing columns with null.
|
||||
const normalised = chunk.map((row) => {
|
||||
const out: Record<string, unknown> = {};
|
||||
for (const col of cols) {
|
||||
out[col] = row[col] ?? null;
|
||||
}
|
||||
return out;
|
||||
});
|
||||
|
||||
const colList = cols.map((c) => `"${c}"`).join(', ');
|
||||
const updateList = cols
|
||||
.filter((c) => c !== 'id')
|
||||
.map((c) => `"${c}" = EXCLUDED."${c}"`)
|
||||
.join(', ');
|
||||
|
||||
// Build values placeholders
|
||||
const valuePlaceholders = normalised
|
||||
.map((_, ri) => `(${cols.map((_, ci) => `$${ri * cols.length + ci + 1}`).join(', ')})`)
|
||||
.join(', ');
|
||||
|
||||
const flatValues = normalised.flatMap((row) => cols.map((c) => row[c] ?? null));
|
||||
|
||||
const query = `
|
||||
INSERT INTO "${tableName}" (${colList})
|
||||
VALUES ${valuePlaceholders}
|
||||
ON CONFLICT (id) DO UPDATE SET ${updateList}
|
||||
`;
|
||||
|
||||
await tx.unsafe(query, flatValues as never[]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async count(tableName: string): Promise<number> {
|
||||
const rows = await this.sql.unsafe(`SELECT COUNT(*)::int AS n FROM "${tableName}"`);
|
||||
const row = rows[0] as unknown as { n: number } | undefined;
|
||||
return row?.n ?? 0;
|
||||
}
|
||||
|
||||
async hasPgvector(): Promise<boolean> {
|
||||
const rows = await this.sql`
|
||||
SELECT 1 FROM pg_extension WHERE extname = 'vector'
|
||||
`;
|
||||
return rows.length > 0;
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
await this.sql.end();
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Source-row normalisation */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Drizzle returns rows as camelCase TypeScript objects (e.g. `userId`, not
|
||||
* `user_id`). The PostgresMigrationTarget upserts via raw SQL and uses the
|
||||
* column names as given — the `insights` no-vector path uses snake_case column
|
||||
* aliases in the SELECT, so those rows already arrive as snake_case.
|
||||
*
|
||||
* For vector tables (insights), if `embedding` is absent from the source row
|
||||
* (because DrizzleMigrationSource omitted it in the no-vector projection), we
|
||||
* explicitly set it to null so the target ON CONFLICT UPDATE doesn't error.
|
||||
*
|
||||
* NOTE: insights.embedding is defined as `vector('embedding', { dimensions:
|
||||
* 1536 })` with no `.notNull()` in schema.ts — it accepts NULL.
|
||||
*/
|
||||
export function normaliseSourceRow(
|
||||
tableName: string,
|
||||
row: Record<string, unknown>,
|
||||
sourceHasVector: boolean,
|
||||
): Record<string, unknown> {
|
||||
const out = { ...row };
|
||||
|
||||
if (VECTOR_TABLES.has(tableName) && !sourceHasVector) {
|
||||
// Source cannot have embeddings — explicitly null them so ON CONFLICT
|
||||
// UPDATE doesn't try to write undefined.
|
||||
out['embedding'] = null;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Precondition checks */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export class MigrationPreconditionError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
public readonly remediation: string,
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'MigrationPreconditionError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify target preconditions before writing any data.
|
||||
*
|
||||
* Checks:
|
||||
* 1. pgvector extension installed.
|
||||
* 2. User-data tables are empty (unless --allow-non-empty).
|
||||
*/
|
||||
export async function checkTargetPreconditions(
|
||||
target: MigrationTarget,
|
||||
allowNonEmpty: boolean,
|
||||
tablesToMigrate: string[],
|
||||
): Promise<void> {
|
||||
const hasVector = await target.hasPgvector();
|
||||
if (!hasVector) {
|
||||
throw new MigrationPreconditionError(
|
||||
'Target Postgres does not have the pgvector extension installed.',
|
||||
'Run: CREATE EXTENSION IF NOT EXISTS vector; — or use the pgvector/pgvector:pg17 Docker image.',
|
||||
);
|
||||
}
|
||||
|
||||
if (!allowNonEmpty) {
|
||||
// Check the first non-empty user-data table.
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = await target.count(table);
|
||||
if (n > 0) {
|
||||
throw new MigrationPreconditionError(
|
||||
`Target table "${table}" already contains ${n.toString()} rows.`,
|
||||
'Pass --allow-non-empty to overwrite existing data (upsert semantics), ' +
|
||||
'or point to an empty target database.',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Core migration runner */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Run the tier migration.
|
||||
*
|
||||
* @param source Adapter for reading source rows.
|
||||
* @param target Adapter for writing rows to target.
|
||||
* @param opts Migration options.
|
||||
* @param sourceHasVector True if the source tier supports vector columns.
|
||||
*/
|
||||
export async function runMigrateTier(
|
||||
source: MigrationSource,
|
||||
target: MigrationTarget,
|
||||
opts: MigrateTierOptions,
|
||||
sourceHasVector = false,
|
||||
): Promise<MigrateTierResult> {
|
||||
const { dryRun, allowNonEmpty, batchSize, onProgress } = opts;
|
||||
|
||||
const tablesToMigrate = getMigrationOrder();
|
||||
|
||||
// Preflight: gather row counts from source.
|
||||
onProgress('[migrate-tier] Gathering source row counts...');
|
||||
const sourceCounts = new Map<string, number>();
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = await source.count(table);
|
||||
sourceCounts.set(table, n);
|
||||
}
|
||||
|
||||
// Log preflight summary.
|
||||
onProgress('[migrate-tier] Tables to migrate:');
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = sourceCounts.get(table) ?? 0;
|
||||
onProgress(` ${table}: ${n.toString()} rows`);
|
||||
}
|
||||
for (const skipped of SKIP_TABLES) {
|
||||
onProgress(` ${skipped}: SKIPPED (ephemeral/environment-specific)`);
|
||||
}
|
||||
|
||||
// Vector column notice.
|
||||
if (!sourceHasVector) {
|
||||
onProgress(
|
||||
'[migrate-tier] NOTE: Source tier has no pgvector support. ' +
|
||||
'insights.embedding will be NULL on all migrated rows.',
|
||||
);
|
||||
}
|
||||
|
||||
if (dryRun) {
|
||||
onProgress('[migrate-tier] DRY RUN — no writes will be made.');
|
||||
const tables: TableMigrationResult[] = tablesToMigrate.map((t) => ({
|
||||
table: t,
|
||||
rowsMigrated: 0,
|
||||
skipped: false,
|
||||
}));
|
||||
for (const skipped of SKIP_TABLES) {
|
||||
tables.push({ table: skipped, rowsMigrated: 0, skipped: true, skipReason: 'ephemeral' });
|
||||
}
|
||||
return { tables, totalRows: 0, dryRun: true };
|
||||
}
|
||||
|
||||
// Check preconditions before writing.
|
||||
await checkTargetPreconditions(target, allowNonEmpty, tablesToMigrate);
|
||||
|
||||
const results: TableMigrationResult[] = [];
|
||||
let totalRows = 0;
|
||||
|
||||
for (const table of tablesToMigrate) {
|
||||
const sourceCount = sourceCounts.get(table) ?? 0;
|
||||
|
||||
if (sourceCount === 0) {
|
||||
onProgress(`[migrate-tier] ${table}: 0 rows — skipping.`);
|
||||
results.push({ table, rowsMigrated: 0, skipped: false });
|
||||
continue;
|
||||
}
|
||||
|
||||
onProgress(`[migrate-tier] ${table}: migrating ${sourceCount.toString()} rows...`);
|
||||
|
||||
let offset = 0;
|
||||
let tableTotal = 0;
|
||||
let lastSuccessfulId: string | undefined;
|
||||
|
||||
try {
|
||||
while (offset < sourceCount) {
|
||||
const rows = await source.readTable(table, { limit: batchSize, offset });
|
||||
if (rows.length === 0) break;
|
||||
|
||||
const normalised = rows.map((row) => normaliseSourceRow(table, row, sourceHasVector));
|
||||
|
||||
await target.upsertBatch(table, normalised);
|
||||
|
||||
lastSuccessfulId = rows[rows.length - 1]?.['id'] as string | undefined;
|
||||
tableTotal += rows.length;
|
||||
offset += rows.length;
|
||||
|
||||
onProgress(
|
||||
`[migrate-tier] ${table}: ${tableTotal.toString()}/${sourceCount.toString()} rows written`,
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
const errMsg = err instanceof Error ? err.message : String(err);
|
||||
throw new Error(
|
||||
`[migrate-tier] Failed on table "${table}" after ${tableTotal.toString()} rows ` +
|
||||
`(last id: ${lastSuccessfulId ?? 'none'}). Error: ${errMsg}\n` +
|
||||
`Remediation: Re-run with --allow-non-empty to resume (upsert is idempotent).`,
|
||||
);
|
||||
}
|
||||
|
||||
results.push({ table, rowsMigrated: tableTotal, skipped: false });
|
||||
totalRows += tableTotal;
|
||||
onProgress(`[migrate-tier] ${table}: done (${tableTotal.toString()} rows).`);
|
||||
}
|
||||
|
||||
// Add skipped table records.
|
||||
for (const skipped of SKIP_TABLES) {
|
||||
results.push({
|
||||
table: skipped,
|
||||
rowsMigrated: 0,
|
||||
skipped: true,
|
||||
skipReason: 'ephemeral or environment-specific — re-issue on target',
|
||||
});
|
||||
}
|
||||
|
||||
onProgress(`[migrate-tier] Complete. ${totalRows.toString()} total rows migrated.`);
|
||||
return { tables: results, totalRows, dryRun: false };
|
||||
}
|
||||
@@ -1,546 +0,0 @@
|
||||
/**
|
||||
* Unit tests for tier-detection.ts.
|
||||
*
|
||||
* All external I/O (postgres, ioredis) is mocked — no live services required.
|
||||
*
|
||||
* Note on hoisting: vi.mock() factories are hoisted above all imports by vitest.
|
||||
* Variables referenced inside factory callbacks must be declared via vi.hoisted()
|
||||
* so they are available at hoist time.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Hoist shared mock state so factories can reference it */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const mocks = vi.hoisted(() => {
|
||||
const mockSqlFn = vi.fn();
|
||||
const mockEnd = vi.fn().mockResolvedValue(undefined);
|
||||
const mockPostgresConstructor = vi.fn(() => {
|
||||
const sql = mockSqlFn as ReturnType<typeof mockSqlFn>;
|
||||
(sql as unknown as Record<string, unknown>)['end'] = mockEnd;
|
||||
return sql;
|
||||
});
|
||||
|
||||
const mockRedisConnect = vi.fn().mockResolvedValue(undefined);
|
||||
const mockRedisPing = vi.fn().mockResolvedValue('PONG');
|
||||
const mockRedisDisconnect = vi.fn();
|
||||
const MockRedis = vi.fn().mockImplementation(() => ({
|
||||
connect: mockRedisConnect,
|
||||
ping: mockRedisPing,
|
||||
disconnect: mockRedisDisconnect,
|
||||
}));
|
||||
|
||||
return {
|
||||
mockSqlFn,
|
||||
mockEnd,
|
||||
mockPostgresConstructor,
|
||||
mockRedisConnect,
|
||||
mockRedisPing,
|
||||
mockRedisDisconnect,
|
||||
MockRedis,
|
||||
};
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Module mocks (registered at hoist time) */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
vi.mock('postgres', () => ({
|
||||
default: mocks.mockPostgresConstructor,
|
||||
}));
|
||||
|
||||
vi.mock('ioredis', () => ({
|
||||
Redis: mocks.MockRedis,
|
||||
}));
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Import SUT after mocks are registered */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
import { detectAndAssertTier, probeServiceHealth, TierDetectionError } from './tier-detection.js';
|
||||
import type { TierConfig } from './tier-detection.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Config fixtures */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const LOCAL_CONFIG: TierConfig = {
|
||||
tier: 'local',
|
||||
storage: { type: 'pglite', dataDir: '.mosaic/pglite' },
|
||||
queue: { type: 'local' },
|
||||
};
|
||||
|
||||
const STANDALONE_CONFIG: TierConfig = {
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@db-host:5432/mosaic' },
|
||||
queue: { type: 'bullmq', url: 'redis://valkey-host:6380' },
|
||||
};
|
||||
|
||||
const FEDERATED_CONFIG: TierConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'bullmq', url: 'redis://valkey-host:6380' },
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Tests */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('detectAndAssertTier', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Default: all probes succeed.
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockEnd.mockResolvedValue(undefined);
|
||||
mocks.mockRedisConnect.mockResolvedValue(undefined);
|
||||
mocks.mockRedisPing.mockResolvedValue('PONG');
|
||||
|
||||
// Re-wire constructor to return a fresh sql-like object each time.
|
||||
mocks.mockPostgresConstructor.mockImplementation(() => {
|
||||
const sql = mocks.mockSqlFn as ReturnType<typeof mocks.mockSqlFn>;
|
||||
(sql as unknown as Record<string, unknown>)['end'] = mocks.mockEnd;
|
||||
return sql;
|
||||
});
|
||||
mocks.MockRedis.mockImplementation(() => ({
|
||||
connect: mocks.mockRedisConnect,
|
||||
ping: mocks.mockRedisPing,
|
||||
disconnect: mocks.mockRedisDisconnect,
|
||||
}));
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 1. local — no-op */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('resolves immediately for tier=local without touching postgres or ioredis', async () => {
|
||||
await expect(detectAndAssertTier(LOCAL_CONFIG)).resolves.toBeUndefined();
|
||||
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 2. standalone — happy path */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('resolves for tier=standalone when postgres and valkey are reachable', async () => {
|
||||
await expect(detectAndAssertTier(STANDALONE_CONFIG)).resolves.toBeUndefined();
|
||||
|
||||
// Postgres was probed (SELECT 1 only — no pgvector check).
|
||||
expect(mocks.mockPostgresConstructor).toHaveBeenCalledTimes(1);
|
||||
expect(mocks.mockSqlFn).toHaveBeenCalledTimes(1);
|
||||
// Valkey was probed.
|
||||
expect(mocks.MockRedis).toHaveBeenCalledTimes(1);
|
||||
expect(mocks.mockRedisPing).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 3. standalone — postgres unreachable */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=postgres when postgres query rejects', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValueOnce(new Error('connection refused'));
|
||||
|
||||
const promise = detectAndAssertTier(STANDALONE_CONFIG);
|
||||
await expect(promise).rejects.toBeInstanceOf(TierDetectionError);
|
||||
|
||||
// Confirm no valkey probe happened (fail fast on first error).
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('sets service=postgres on the error when postgres fails', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('postgres');
|
||||
expect(typed.remediation).toContain('docker compose');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 4. standalone — valkey unreachable */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=valkey when ping fails', async () => {
|
||||
// Postgres probe succeeds; valkey connect fails.
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockRedisConnect.mockRejectedValue(new Error('ECONNREFUSED'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('valkey');
|
||||
expect(typed.message).toContain('valkey');
|
||||
expect(typed.remediation).toContain('valkey-federated');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 5. federated — happy path */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('resolves for tier=federated when all three checks pass', async () => {
|
||||
// SELECT 1 and CREATE EXTENSION both succeed.
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
|
||||
await expect(detectAndAssertTier(FEDERATED_CONFIG)).resolves.toBeUndefined();
|
||||
|
||||
// postgres probe (SELECT 1) + pgvector probe (CREATE EXTENSION) = 2 postgres constructors.
|
||||
expect(mocks.mockPostgresConstructor).toHaveBeenCalledTimes(2);
|
||||
expect(mocks.mockSqlFn).toHaveBeenCalledTimes(2);
|
||||
// Valkey probed once.
|
||||
expect(mocks.MockRedis).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 6. federated — pgvector not installable */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=pgvector when CREATE EXTENSION fails', async () => {
|
||||
// SELECT 1 succeeds (first call), CREATE EXTENSION fails (second call).
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // SELECT 1
|
||||
.mockRejectedValueOnce(new Error('extension "vector" is not available'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(FEDERATED_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('pgvector');
|
||||
expect(typed.message).toContain('pgvector');
|
||||
expect(typed.remediation).toContain('pgvector/pgvector');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 7. probeValkey honors connectTimeout and lazyConnect */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('constructs the ioredis Redis client with connectTimeout: 5000', async () => {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
|
||||
expect(mocks.MockRedis).toHaveBeenCalledOnce();
|
||||
expect(mocks.MockRedis).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
expect.objectContaining({ connectTimeout: 5000, lazyConnect: true }),
|
||||
);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 8. probePgvector — library-not-installed remediation */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('includes pgvector/pgvector:pg17 in remediation when pgvector library is missing', async () => {
|
||||
// SELECT 1 succeeds; CREATE EXTENSION fails with the canonical library-missing message.
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // SELECT 1 (probePostgres)
|
||||
.mockRejectedValueOnce(new Error('extension "vector" is not available')); // probePgvector
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(FEDERATED_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('pgvector');
|
||||
expect(typed.remediation).toContain('pgvector/pgvector:pg17');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 9. probePgvector — permission / other error remediation */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('mentions CREATE permission or superuser in remediation for a generic pgvector error', async () => {
|
||||
// SELECT 1 succeeds; CREATE EXTENSION fails with a permission error.
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // SELECT 1 (probePostgres)
|
||||
.mockRejectedValueOnce(new Error('permission denied to create extension'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(FEDERATED_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('pgvector');
|
||||
// Must NOT point to the image fix — that's only for the library-missing case.
|
||||
expect(typed.remediation).not.toContain('pgvector/pgvector:pg17');
|
||||
// Must mention permissions or superuser.
|
||||
expect(typed.remediation).toMatch(/CREATE|superuser/i);
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 10. federated tier rejects non-bullmq queue.type */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=config for federated tier with queue.type !== bullmq', async () => {
|
||||
const badConfig: TierConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'local' },
|
||||
};
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(badConfig);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('config');
|
||||
expect(typed.remediation).toContain('bullmq');
|
||||
}
|
||||
|
||||
// No network probes should have been attempted.
|
||||
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 11. Error fields populated */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('populates host, port, and remediation on a thrown TierDetectionError', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
|
||||
|
||||
let caught: TierDetectionError | undefined;
|
||||
try {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
} catch (err) {
|
||||
caught = err as TierDetectionError;
|
||||
}
|
||||
|
||||
expect(caught).toBeInstanceOf(TierDetectionError);
|
||||
expect(caught!.service).toBe('postgres');
|
||||
// Host and port are extracted from the Postgres URL in STANDALONE_CONFIG.
|
||||
expect(caught!.host).toBe('db-host');
|
||||
expect(caught!.port).toBe(5432);
|
||||
expect(caught!.remediation).toMatch(/docker compose/i);
|
||||
expect(caught!.message).toContain('db-host:5432');
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* probeServiceHealth tests */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('probeServiceHealth', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockEnd.mockResolvedValue(undefined);
|
||||
mocks.mockRedisConnect.mockResolvedValue(undefined);
|
||||
mocks.mockRedisPing.mockResolvedValue('PONG');
|
||||
|
||||
mocks.mockPostgresConstructor.mockImplementation(() => {
|
||||
const sql = mocks.mockSqlFn as ReturnType<typeof mocks.mockSqlFn>;
|
||||
(sql as unknown as Record<string, unknown>)['end'] = mocks.mockEnd;
|
||||
return sql;
|
||||
});
|
||||
mocks.MockRedis.mockImplementation(() => ({
|
||||
connect: mocks.mockRedisConnect,
|
||||
ping: mocks.mockRedisPing,
|
||||
disconnect: mocks.mockRedisDisconnect,
|
||||
}));
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 12. local tier — all skipped, green */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns all services as skipped and overall green for local tier', async () => {
|
||||
const report = await probeServiceHealth(LOCAL_CONFIG);
|
||||
|
||||
expect(report.tier).toBe('local');
|
||||
expect(report.overall).toBe('green');
|
||||
expect(report.services).toHaveLength(3);
|
||||
for (const svc of report.services) {
|
||||
expect(svc.status).toBe('skipped');
|
||||
}
|
||||
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 13. postgres fails, valkey ok → red */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red overall with postgres fail and valkey ok for standalone when postgres fails', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
|
||||
|
||||
const report = await probeServiceHealth(STANDALONE_CONFIG);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const pgCheck = report.services.find((s) => s.name === 'postgres');
|
||||
expect(pgCheck?.status).toBe('fail');
|
||||
expect(pgCheck?.error).toBeDefined();
|
||||
expect(pgCheck?.error?.remediation).toContain('docker compose');
|
||||
|
||||
const vkCheck = report.services.find((s) => s.name === 'valkey');
|
||||
expect(vkCheck?.status).toBe('ok');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 14. federated all green → 3 services ok */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns green overall with all 3 services ok for federated when all pass', async () => {
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
|
||||
const report = await probeServiceHealth(FEDERATED_CONFIG);
|
||||
|
||||
expect(report.tier).toBe('federated');
|
||||
expect(report.overall).toBe('green');
|
||||
expect(report.services).toHaveLength(3);
|
||||
|
||||
for (const svc of report.services) {
|
||||
expect(svc.status).toBe('ok');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 15. durationMs is a non-negative number for every service check */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('sets durationMs as a non-negative number for every service check', async () => {
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
|
||||
const report = await probeServiceHealth(FEDERATED_CONFIG);
|
||||
|
||||
for (const svc of report.services) {
|
||||
expect(typeof svc.durationMs).toBe('number');
|
||||
expect(svc.durationMs).toBeGreaterThanOrEqual(0);
|
||||
}
|
||||
});
|
||||
|
||||
it('sets durationMs >= 0 even for skipped services (local tier)', async () => {
|
||||
const report = await probeServiceHealth(LOCAL_CONFIG);
|
||||
|
||||
for (const svc of report.services) {
|
||||
expect(typeof svc.durationMs).toBe('number');
|
||||
expect(svc.durationMs).toBeGreaterThanOrEqual(0);
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 16. configPath is passed through to the report */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('includes configPath in the report when provided', async () => {
|
||||
const report = await probeServiceHealth(LOCAL_CONFIG, '/etc/mosaic/mosaic.config.json');
|
||||
expect(report.configPath).toBe('/etc/mosaic/mosaic.config.json');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 17. standalone — valkey fails, postgres ok → red */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red with valkey fail and postgres ok for standalone when valkey fails', async () => {
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockRedisConnect.mockRejectedValue(new Error('ECONNREFUSED'));
|
||||
|
||||
const report = await probeServiceHealth(STANDALONE_CONFIG);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const pgCheck = report.services.find((s) => s.name === 'postgres');
|
||||
expect(pgCheck?.status).toBe('ok');
|
||||
|
||||
const vkCheck = report.services.find((s) => s.name === 'valkey');
|
||||
expect(vkCheck?.status).toBe('fail');
|
||||
expect(vkCheck?.error).toBeDefined();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 18. federated — pgvector fails → red with remediation */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red with pgvector fail for federated when pgvector probe fails', async () => {
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // postgres SELECT 1
|
||||
.mockRejectedValueOnce(new Error('extension "vector" is not available')); // pgvector
|
||||
|
||||
const report = await probeServiceHealth(FEDERATED_CONFIG);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const pvCheck = report.services.find((s) => s.name === 'pgvector');
|
||||
expect(pvCheck?.status).toBe('fail');
|
||||
expect(pvCheck?.error?.remediation).toContain('pgvector/pgvector:pg17');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 19. federated — non-bullmq queue → red config error, no network */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red overall with config error when federated tier has non-bullmq queue (no network call)', async () => {
|
||||
const federatedBadQueueConfig: TierConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'local' },
|
||||
};
|
||||
|
||||
const report = await probeServiceHealth(federatedBadQueueConfig);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const valkey = report.services.find((s) => s.name === 'valkey');
|
||||
expect(valkey?.status).toBe('fail');
|
||||
expect(valkey?.error?.remediation).toMatch(/bullmq/i);
|
||||
|
||||
// Critically: no network call was made — MockRedis constructor must NOT have been called.
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 20. durationMs actually measures real elapsed time */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('measures real elapsed time for service probes', async () => {
|
||||
const DELAY_MS = 25;
|
||||
|
||||
// Make the postgres mock introduce a real wall-clock delay.
|
||||
mocks.mockSqlFn.mockImplementation(
|
||||
() =>
|
||||
new Promise((resolve) =>
|
||||
setTimeout(() => {
|
||||
resolve([]);
|
||||
}, DELAY_MS),
|
||||
),
|
||||
);
|
||||
|
||||
const report = await probeServiceHealth(STANDALONE_CONFIG);
|
||||
|
||||
const pgCheck = report.services.find((s) => s.name === 'postgres');
|
||||
expect(pgCheck).toBeDefined();
|
||||
// Must be >= 20ms (small slack for jitter). Would be 0 if timer were stubbed.
|
||||
expect(pgCheck!.durationMs).toBeGreaterThanOrEqual(20);
|
||||
});
|
||||
});
|
||||
@@ -1,555 +0,0 @@
|
||||
/**
|
||||
* Tier Detection — pre-flight service reachability probes.
|
||||
*
|
||||
* Lifted from apps/gateway/src/bootstrap/tier-detector.ts so both the gateway
|
||||
* and the mosaic CLI can share the same probe logic without duplicating code or
|
||||
* creating circular workspace dependencies.
|
||||
*
|
||||
* Library choices:
|
||||
* - Postgres: `postgres` npm package (already a dep via @mosaicstack/db / drizzle-orm).
|
||||
* - Valkey: `ioredis` (compatible with Valkey; same URL convention used by bullmq).
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import { Redis } from 'ioredis';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Local structural type — avoids circular dependency */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Minimal structural shape required for tier detection.
|
||||
* Mirrors the relevant fields of MosaicConfig (from @mosaicstack/config) without
|
||||
* creating a dependency cycle (config depends on storage for StorageConfig).
|
||||
* Any object that satisfies MosaicConfig also satisfies this type.
|
||||
*/
|
||||
export interface TierConfig {
|
||||
tier: 'local' | 'standalone' | 'federated';
|
||||
storage:
|
||||
| { type: 'pglite'; dataDir?: string }
|
||||
| { type: 'postgres'; url: string; enableVector?: boolean }
|
||||
| { type: 'files'; dataDir: string; format?: 'json' | 'md' };
|
||||
queue: { type: string; url?: string };
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Public types */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export interface ServiceCheck {
|
||||
name: 'postgres' | 'valkey' | 'pgvector';
|
||||
status: 'ok' | 'fail' | 'skipped';
|
||||
host?: string;
|
||||
port?: number;
|
||||
durationMs: number;
|
||||
error?: { message: string; remediation: string };
|
||||
}
|
||||
|
||||
export interface TierHealthReport {
|
||||
tier: 'local' | 'standalone' | 'federated';
|
||||
configPath?: string;
|
||||
overall: 'green' | 'yellow' | 'red';
|
||||
services: ServiceCheck[];
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Structured error type */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export class TierDetectionError extends Error {
|
||||
public readonly service: 'postgres' | 'valkey' | 'pgvector' | 'config';
|
||||
public readonly host: string;
|
||||
public readonly port: number;
|
||||
public readonly remediation: string;
|
||||
|
||||
constructor(opts: {
|
||||
service: 'postgres' | 'valkey' | 'pgvector' | 'config';
|
||||
host: string;
|
||||
port: number;
|
||||
remediation: string;
|
||||
cause?: unknown;
|
||||
}) {
|
||||
const message =
|
||||
`[tier-detector] ${opts.service} unreachable or unusable at ` +
|
||||
`${opts.host}:${opts.port} — ${opts.remediation}`;
|
||||
super(message, { cause: opts.cause });
|
||||
this.name = 'TierDetectionError';
|
||||
this.service = opts.service;
|
||||
this.host = opts.host;
|
||||
this.port = opts.port;
|
||||
this.remediation = opts.remediation;
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* URL helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/** Extract host and port from a URL string, returning safe fallbacks on parse failure. */
|
||||
function parseHostPort(url: string, defaultPort: number): { host: string; port: number } {
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
const host = parsed.hostname || 'unknown';
|
||||
const port = parsed.port ? parseInt(parsed.port, 10) : defaultPort;
|
||||
return { host, port };
|
||||
} catch {
|
||||
return { host: 'unknown', port: defaultPort };
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Internal probe results */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface ProbeResult {
|
||||
host: string;
|
||||
port: number;
|
||||
durationMs: number;
|
||||
error?: { message: string; remediation: string };
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Postgres probe */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
async function probePostgres(url: string): Promise<void> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
// Run a trivial query to confirm connectivity.
|
||||
await sql`SELECT 1`;
|
||||
} catch (cause) {
|
||||
throw new TierDetectionError({
|
||||
service: 'postgres',
|
||||
host,
|
||||
port,
|
||||
remediation:
|
||||
'Start Postgres: `docker compose -f docker-compose.federated.yml --profile federated up -d postgres-federated`',
|
||||
cause,
|
||||
});
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {
|
||||
// Ignore cleanup errors — we already have what we need.
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function probePostgresMeasured(url: string): Promise<ProbeResult> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
const start = Date.now();
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
await sql`SELECT 1`;
|
||||
return { host, port, durationMs: Date.now() - start };
|
||||
} catch (cause) {
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
durationMs: Date.now() - start,
|
||||
error: {
|
||||
message: cause instanceof Error ? cause.message : String(cause),
|
||||
remediation:
|
||||
'Start Postgres: `docker compose -f docker-compose.federated.yml --profile federated up -d postgres-federated`',
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* pgvector probe */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
async function probePgvector(url: string): Promise<void> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
// This succeeds whether the extension is already installed or freshly created.
|
||||
// It errors only if the pgvector shared library is missing from the Postgres binary.
|
||||
await sql`CREATE EXTENSION IF NOT EXISTS vector`;
|
||||
} catch (cause) {
|
||||
const causeMsg = cause instanceof Error ? cause.message.toLowerCase() : '';
|
||||
const isLibraryMissing = causeMsg.includes('extension "vector" is not available');
|
||||
const remediation = isLibraryMissing
|
||||
? 'Use the `pgvector/pgvector:pg17` image, not the stock `postgres:17` image. See `docker-compose.federated.yml`.'
|
||||
: 'The database role lacks permission to CREATE EXTENSION. Grant `CREATE` on the database, or run as a superuser.';
|
||||
throw new TierDetectionError({
|
||||
service: 'pgvector',
|
||||
host,
|
||||
port,
|
||||
remediation,
|
||||
cause,
|
||||
});
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {
|
||||
// Ignore cleanup errors.
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function probePgvectorMeasured(url: string): Promise<ProbeResult> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
const start = Date.now();
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
await sql`CREATE EXTENSION IF NOT EXISTS vector`;
|
||||
return { host, port, durationMs: Date.now() - start };
|
||||
} catch (cause) {
|
||||
const causeMsg = cause instanceof Error ? cause.message.toLowerCase() : '';
|
||||
const isLibraryMissing = causeMsg.includes('extension "vector" is not available');
|
||||
const remediation = isLibraryMissing
|
||||
? 'Use the `pgvector/pgvector:pg17` image, not the stock `postgres:17` image. See `docker-compose.federated.yml`.'
|
||||
: 'The database role lacks permission to CREATE EXTENSION. Grant `CREATE` on the database, or run as a superuser.';
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
durationMs: Date.now() - start,
|
||||
error: { message: cause instanceof Error ? cause.message : String(cause), remediation },
|
||||
};
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Valkey probe */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const DEFAULT_VALKEY_URL = 'redis://localhost:6380';
|
||||
|
||||
async function probeValkey(url: string): Promise<void> {
|
||||
const { host, port } = parseHostPort(url, 6380);
|
||||
const client = new Redis(url, {
|
||||
enableReadyCheck: false,
|
||||
maxRetriesPerRequest: 0,
|
||||
retryStrategy: () => null, // no retries — fail fast
|
||||
lazyConnect: true,
|
||||
connectTimeout: 5000, // fail-fast: 5-second hard cap on connection attempt
|
||||
});
|
||||
|
||||
try {
|
||||
await client.connect();
|
||||
const pong = await client.ping();
|
||||
if (pong !== 'PONG') {
|
||||
throw new Error(`Unexpected PING response: ${pong}`);
|
||||
}
|
||||
} catch (cause) {
|
||||
throw new TierDetectionError({
|
||||
service: 'valkey',
|
||||
host,
|
||||
port,
|
||||
remediation:
|
||||
'Start Valkey: `docker compose -f docker-compose.federated.yml --profile federated up -d valkey-federated`',
|
||||
cause,
|
||||
});
|
||||
} finally {
|
||||
client.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
async function probeValkeyMeasured(url: string): Promise<ProbeResult> {
|
||||
const { host, port } = parseHostPort(url, 6380);
|
||||
const start = Date.now();
|
||||
const client = new Redis(url, {
|
||||
enableReadyCheck: false,
|
||||
maxRetriesPerRequest: 0,
|
||||
retryStrategy: () => null,
|
||||
lazyConnect: true,
|
||||
connectTimeout: 5000,
|
||||
});
|
||||
try {
|
||||
await client.connect();
|
||||
const pong = await client.ping();
|
||||
if (pong !== 'PONG') {
|
||||
throw new Error(`Unexpected PING response: ${pong}`);
|
||||
}
|
||||
return { host, port, durationMs: Date.now() - start };
|
||||
} catch (cause) {
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
durationMs: Date.now() - start,
|
||||
error: {
|
||||
message: cause instanceof Error ? cause.message : String(cause),
|
||||
remediation:
|
||||
'Start Valkey: `docker compose -f docker-compose.federated.yml --profile federated up -d valkey-federated`',
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
client.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Public entry points */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Assert that all services required by `config.tier` are reachable.
|
||||
*
|
||||
* - `local` — no-op (PGlite is in-process; no external services).
|
||||
* - `standalone` — assert Postgres + Valkey (if queue.type === 'bullmq').
|
||||
* - `federated` — assert Postgres + Valkey + pgvector installability.
|
||||
*
|
||||
* Throws `TierDetectionError` on the first failure with host:port and
|
||||
* a remediation hint.
|
||||
*/
|
||||
export async function detectAndAssertTier(config: TierConfig): Promise<void> {
|
||||
if (config.tier === 'local') {
|
||||
// PGlite runs in-process — nothing to probe.
|
||||
return;
|
||||
}
|
||||
|
||||
const pgUrl =
|
||||
config.storage.type === 'postgres' ? config.storage.url : 'postgresql://localhost:5432/mosaic';
|
||||
|
||||
const valkeyUrl =
|
||||
config.queue.type === 'bullmq' ? (config.queue.url ?? DEFAULT_VALKEY_URL) : null;
|
||||
|
||||
if (config.tier === 'standalone') {
|
||||
await probePostgres(pgUrl);
|
||||
if (valkeyUrl) {
|
||||
await probeValkey(valkeyUrl);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// tier === 'federated'
|
||||
// Reject misconfigured queue upfront — federated requires bullmq + a Valkey URL.
|
||||
if (config.queue.type !== 'bullmq') {
|
||||
throw new TierDetectionError({
|
||||
service: 'config',
|
||||
host: 'localhost',
|
||||
port: 0,
|
||||
remediation:
|
||||
"Federated tier requires queue.type === 'bullmq'. " +
|
||||
"Set queue: { type: 'bullmq', url: 'redis://...' } in your mosaic.config.json.",
|
||||
});
|
||||
}
|
||||
const federatedValkeyUrl = config.queue.url ?? DEFAULT_VALKEY_URL;
|
||||
await probePostgres(pgUrl);
|
||||
await probeValkey(federatedValkeyUrl);
|
||||
await probePgvector(pgUrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Non-throwing variant for `mosaic gateway doctor`.
|
||||
*
|
||||
* Probes ALL required services even if some fail, returning a structured report.
|
||||
* Services not required for the current tier are reported as `skipped`.
|
||||
*
|
||||
* Overall status:
|
||||
* - `green` — all required services OK
|
||||
* - `yellow` — all required services OK, but a non-critical check failed
|
||||
* (currently unused — reserved for future optional probes)
|
||||
* - `red` — at least one required service failed
|
||||
*/
|
||||
export async function probeServiceHealth(
|
||||
config: TierConfig,
|
||||
configPath?: string,
|
||||
): Promise<TierHealthReport> {
|
||||
const tier = config.tier;
|
||||
|
||||
// local tier: PGlite is in-process, no external services needed.
|
||||
if (tier === 'local') {
|
||||
return {
|
||||
tier,
|
||||
configPath,
|
||||
overall: 'green',
|
||||
services: [
|
||||
{ name: 'postgres', status: 'skipped', durationMs: 0 },
|
||||
{ name: 'valkey', status: 'skipped', durationMs: 0 },
|
||||
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
const pgUrl =
|
||||
config.storage.type === 'postgres' ? config.storage.url : 'postgresql://localhost:5432/mosaic';
|
||||
|
||||
const valkeyUrl =
|
||||
config.queue.type === 'bullmq' ? (config.queue.url ?? DEFAULT_VALKEY_URL) : null;
|
||||
|
||||
const services: ServiceCheck[] = [];
|
||||
let hasFailure = false;
|
||||
|
||||
if (tier === 'standalone') {
|
||||
// Postgres — required
|
||||
const pgResult = await probePostgresMeasured(pgUrl);
|
||||
if (pgResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'fail',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
error: pgResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'ok',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
});
|
||||
}
|
||||
|
||||
// Valkey — required if bullmq
|
||||
if (valkeyUrl) {
|
||||
const vkResult = await probeValkeyMeasured(valkeyUrl);
|
||||
if (vkResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'fail',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
error: vkResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'ok',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
services.push({ name: 'valkey', status: 'skipped', durationMs: 0 });
|
||||
}
|
||||
|
||||
// pgvector — not required for standalone
|
||||
services.push({ name: 'pgvector', status: 'skipped', durationMs: 0 });
|
||||
|
||||
return {
|
||||
tier,
|
||||
configPath,
|
||||
overall: hasFailure ? 'red' : 'green',
|
||||
services,
|
||||
};
|
||||
}
|
||||
|
||||
// tier === 'federated'
|
||||
// Postgres — required
|
||||
const pgResult = await probePostgresMeasured(pgUrl);
|
||||
if (pgResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'fail',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
error: pgResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'ok',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
});
|
||||
}
|
||||
|
||||
// Valkey — required for federated (queue.type must be bullmq)
|
||||
if (config.queue.type !== 'bullmq') {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'fail',
|
||||
host: 'localhost',
|
||||
port: 0,
|
||||
durationMs: 0,
|
||||
error: {
|
||||
message: "Federated tier requires queue.type === 'bullmq'",
|
||||
remediation:
|
||||
"Set queue: { type: 'bullmq', url: 'redis://...' } in your mosaic.config.json.",
|
||||
},
|
||||
});
|
||||
} else {
|
||||
const federatedValkeyUrl = config.queue.url ?? DEFAULT_VALKEY_URL;
|
||||
const vkResult = await probeValkeyMeasured(federatedValkeyUrl);
|
||||
if (vkResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'fail',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
error: vkResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'ok',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// pgvector — required for federated
|
||||
const pvResult = await probePgvectorMeasured(pgUrl);
|
||||
if (pvResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'pgvector',
|
||||
status: 'fail',
|
||||
host: pvResult.host,
|
||||
port: pvResult.port,
|
||||
durationMs: pvResult.durationMs,
|
||||
error: pvResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'pgvector',
|
||||
status: 'ok',
|
||||
host: pvResult.host,
|
||||
port: pvResult.port,
|
||||
durationMs: pvResult.durationMs,
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
tier,
|
||||
configPath,
|
||||
overall: hasFailure ? 'red' : 'green',
|
||||
services,
|
||||
};
|
||||
}
|
||||
@@ -38,6 +38,6 @@ export interface StorageAdapter {
|
||||
}
|
||||
|
||||
export type StorageConfig =
|
||||
| { type: 'postgres'; url: string; enableVector?: boolean }
|
||||
| { type: 'postgres'; url: string }
|
||||
| { type: 'pglite'; dataDir?: string }
|
||||
| { type: 'files'; dataDir: string; format?: 'json' | 'md' };
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
import { defineConfig } from 'vitest/config';
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'node',
|
||||
},
|
||||
});
|
||||
12
pnpm-lock.yaml
generated
12
pnpm-lock.yaml
generated
@@ -152,18 +152,12 @@ importers:
|
||||
fastify:
|
||||
specifier: ^5.0.0
|
||||
version: 5.8.2
|
||||
ioredis:
|
||||
specifier: ^5.10.0
|
||||
version: 5.10.0
|
||||
node-cron:
|
||||
specifier: ^4.2.1
|
||||
version: 4.2.1
|
||||
openai:
|
||||
specifier: ^6.32.0
|
||||
version: 6.32.0(ws@8.20.0)(zod@4.3.6)
|
||||
postgres:
|
||||
specifier: ^3.4.8
|
||||
version: 3.4.8
|
||||
reflect-metadata:
|
||||
specifier: ^0.2.0
|
||||
version: 0.2.2
|
||||
@@ -648,12 +642,6 @@ importers:
|
||||
commander:
|
||||
specifier: ^13.0.0
|
||||
version: 13.1.0
|
||||
ioredis:
|
||||
specifier: ^5.10.0
|
||||
version: 5.10.0
|
||||
postgres:
|
||||
specifier: ^3.4.8
|
||||
version: 3.4.8
|
||||
devDependencies:
|
||||
typescript:
|
||||
specifier: ^5.8.0
|
||||
|
||||
Reference in New Issue
Block a user