Compare commits
1 Commits
feat/feder
...
release/mo
| Author | SHA1 | Date | |
|---|---|---|---|
| f73cc59b6d |
@@ -1,60 +0,0 @@
|
||||
# docker-compose.federated.yml — Federated tier overlay
|
||||
#
|
||||
# USAGE:
|
||||
# docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
#
|
||||
# This file is a standalone overlay for the Mosaic federated tier.
|
||||
# It is NOT an extension of docker-compose.yml — it defines its own services
|
||||
# and named volumes so it can run independently of the base dev stack.
|
||||
#
|
||||
# IMPORTANT — HOST PORT CONFLICTS:
|
||||
# The federated services bind the same host ports as the base dev stack
|
||||
# (5433 for Postgres, 6380 for Valkey). You must stop the base dev stack
|
||||
# before starting the federated stack on the same machine:
|
||||
# docker compose down
|
||||
# docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
#
|
||||
# pgvector extension:
|
||||
# The vector extension is created automatically at first boot via
|
||||
# ./infra/pg-init/01-extensions.sql (CREATE EXTENSION IF NOT EXISTS vector).
|
||||
#
|
||||
# Tier configuration:
|
||||
# Used by `mosaic` instances configured with `tier: federated`.
|
||||
# DEFAULT_FEDERATED_CONFIG points at:
|
||||
# postgresql://mosaic:mosaic@localhost:5433/mosaic
|
||||
|
||||
services:
|
||||
postgres-federated:
|
||||
image: pgvector/pgvector:pg17
|
||||
profiles: [federated]
|
||||
ports:
|
||||
- '${PG_FEDERATED_HOST_PORT:-5433}:5432'
|
||||
environment:
|
||||
POSTGRES_USER: mosaic
|
||||
POSTGRES_PASSWORD: mosaic
|
||||
POSTGRES_DB: mosaic
|
||||
volumes:
|
||||
- pg_federated_data:/var/lib/postgresql/data
|
||||
- ./infra/pg-init:/docker-entrypoint-initdb.d:ro
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready -U mosaic']
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
valkey-federated:
|
||||
image: valkey/valkey:8-alpine
|
||||
profiles: [federated]
|
||||
ports:
|
||||
- '${VALKEY_FEDERATED_HOST_PORT:-6380}:6379'
|
||||
volumes:
|
||||
- valkey_federated_data:/data
|
||||
healthcheck:
|
||||
test: ['CMD', 'valkey-cli', 'ping']
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
volumes:
|
||||
pg_federated_data:
|
||||
valkey_federated_data:
|
||||
@@ -1,116 +1,73 @@
|
||||
# Mission Manifest — MVP
|
||||
# Mission Manifest — Install UX v2
|
||||
|
||||
> Top-level rollup tracking Mosaic Stack MVP execution.
|
||||
> Workstreams have their own manifests; this document is the source of truth for MVP scope, status, and history.
|
||||
> Owner: Orchestrator (sole writer).
|
||||
> Persistent document tracking full mission scope, status, and session history.
|
||||
> Updated by the orchestrator at each phase transition and milestone completion.
|
||||
|
||||
## Mission
|
||||
|
||||
**ID:** mvp-20260312
|
||||
**Statement:** Ship a self-hosted, multi-user AI agent platform that consolidates the user's disparate jarvis-brain usage across home and USC workstations into a single coherent system reachable via three first-class surfaces — webUI, TUI, and CLI — with federation as the data-layer mechanism that makes cross-host agent sessions work in real time without copying user data across the boundary.
|
||||
**Phase:** Execution (workstream W1 in planning-complete state)
|
||||
**Current Workstream:** W1 — Federation v1
|
||||
**Progress:** 0 / 1 declared workstreams complete (more workstreams will be declared as scope is refined)
|
||||
**Status:** active (continuous since 2026-03-13)
|
||||
**Last Updated:** 2026-04-19 (manifest authored at the rollup level; install-ux-v2 archived; W1 federation planning landed via PR #468)
|
||||
**Source PRD:** [docs/PRD.md](./PRD.md) — Mosaic Stack v0.1.0
|
||||
**Scratchpad:** [docs/scratchpads/mvp-20260312.md](./scratchpads/mvp-20260312.md) (active since 2026-03-13; 14 prior sessions of phase-based execution)
|
||||
**ID:** install-ux-v2-20260405
|
||||
**Statement:** The install-ux-hardening mission shipped the plumbing (uninstall, masked password, hooks consent, unified flow, headless path), but the first real end-to-end run surfaced a critical regression and a collection of UX failings that make the wizard feel neither quick nor intelligent. This mission closes the bootstrap regression as a hotfix, then rethinks the first-run experience around a provider-first, intent-driven flow with a drill-down main menu and a genuinely fast quick-start.
|
||||
**Phase:** Execution
|
||||
**Current Milestone:** IUV-M03
|
||||
**Progress:** 2 / 3 milestones
|
||||
**Status:** active
|
||||
**Last Updated:** 2026-04-05 (IUV-M02 complete — CORS/FQDN + skill installer rework)
|
||||
**Parent Mission:** [install-ux-hardening-20260405](./archive/missions/install-ux-hardening-20260405/MISSION-MANIFEST.md) (complete — `mosaic-v0.0.25`)
|
||||
|
||||
## Context
|
||||
|
||||
Jarvis (v0.2.0) was a single-host Python/Next.js assistant. The user runs sessions across 3–4 workstations split between home and USC. Today every session reaches back to a single jarvis-brain checkout, which is brittle (offline-hostile, no consolidation, no shared state beyond a single repo). A prior OpenBrain attempt punished offline use, introduced cache/latency/opacity pain, and tightly coupled every session to a remote service.
|
||||
Real-run testing of `@mosaicstack/mosaic@0.0.25` uncovered:
|
||||
|
||||
The MVP solution: keep each user's home gateway as the source of truth, connect gateways gateway-to-gateway over mTLS with scoped read-only data exposure, and expose the unified experience through three coherent surfaces:
|
||||
|
||||
- **webUI** — the primary visual control plane (Next.js + React 19, `apps/web`)
|
||||
- **TUI** — the terminal-native interface for agent work (`packages/mosaic` wizard + Pi TUI)
|
||||
- **CLI** — `mosaic` command for scripted/headless workflows
|
||||
|
||||
Federation is required NOW because it unblocks cross-host consolidation; it is necessary but not sufficient for MVP. Additional workstreams will be declared as their scope solidifies.
|
||||
|
||||
## Prior Execution (March 13 → April 5)
|
||||
|
||||
This manifest was authored on 2026-04-19 to rollup work that began 2026-03-13. Before this date, MVP work was tracked via phase-based Gitea milestones and the scratchpad — there was no rollup manifest at the `docs/MISSION-MANIFEST.md` path (the slot was occupied by sub-mission manifests for `install-ux-hardening` and then `install-ux-v2`).
|
||||
|
||||
Prior execution outline (full detail in [scratchpads/mvp-20260312.md](./scratchpads/mvp-20260312.md)):
|
||||
|
||||
- **Phases 0 → 7** (Gitea milestones `ms-157` → `ms-164`, issues #1–#59): foundation, core API, agent layer, web dashboard, memory, remote control, CLI/tools, polish/beta. Substantially shipped by Session 13.
|
||||
- **Phase 8** (Gitea milestone `ms-165`, issues #160–#172): platform architecture extension — teams, workspaces, `/provider` OAuth, preferences, etc. Wave-based execution plan defined at Session 14.
|
||||
- **Sub-missions** during the gap: `install-ux-hardening` (complete, `mosaic-v0.0.25`), `install-ux-v2` (complete on 2026-04-19, `0.0.27` → `0.0.29`). Both archived under `docs/archive/missions/`.
|
||||
|
||||
Going forward, MVP execution is tracked through the **Workstreams** table below. Phase-based issue numbering is preserved on Gitea but is no longer the primary control plane.
|
||||
|
||||
## Cross-Cutting MVP Requirements
|
||||
|
||||
These apply to every workstream and every milestone. A workstream cannot ship if it breaks any of them.
|
||||
|
||||
| # | Requirement |
|
||||
| ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| MVP-X1 | Three-surface parity: every user-facing capability is reachable via webUI **and** TUI **and** CLI (read paths at minimum; mutating paths where applicable to the surface). |
|
||||
| MVP-X2 | Multi-tenant isolation is enforced at every boundary; no cross-user leakage under any circumstance. |
|
||||
| MVP-X3 | Auth via BetterAuth (existing); SSO adapters per PRD; admin bootstrap remains a one-shot. |
|
||||
| MVP-X4 | Three quality gates green before push: `pnpm typecheck`, `pnpm lint`, `pnpm format:check`. |
|
||||
| MVP-X5 | Federated tier (PG + pgvector + Valkey) is the canonical MVP deployment topology; local/standalone tiers continue to work for non-federated installs but are not the MVP target. |
|
||||
| MVP-X6 | OTEL tracing on every request path; `traceparent` propagated across the federation boundary in both directions. |
|
||||
| MVP-X7 | Trunk merge strategy: branch from `main`, squash-merge via PR, never push to `main` directly. |
|
||||
1. **Critical:** admin bootstrap fails with HTTP 400 `property email should not exist` — `bootstrap.controller.ts` uses `import type { BootstrapSetupDto }`, erasing the class at runtime. Nest's `@Body()` falls back to plain `Object` metatype, and ValidationPipe with `forbidNonWhitelisted` rejects every property. One-character fix (drop the `type` keyword), but it blocks the happy path of the release that just shipped.
|
||||
2. The wizard reports `✔ Wizard complete` and `✔ Done` _after_ the bootstrap 400 — failure only propagates in headless mode (`wizard.ts:147`).
|
||||
3. The gateway port prompt does not prefill `14242` in the input buffer.
|
||||
4. `"What is Mosaic?"` intro copy does not mention Pi SDK (the actual agent runtime behind Claude/Codex/OpenCode).
|
||||
5. CORS origin prompt is confusing — the user should be able to supply an FQDN/hostname and have the system derive the CORS value.
|
||||
6. Skill / additional feature install section is unusable in practice.
|
||||
7. Quick-start asks far too many questions to be meaningfully "quick".
|
||||
8. No drill-down main menu — everything is a linear interrogation.
|
||||
9. Provider setup happens late and without intelligence. An OpenClaw-style provider-first flow would let the user describe what they want in natural language, have the agent expound on it, and have the agent choose its own name based on that intent.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
The MVP is complete when ALL declared workstreams are complete AND every cross-cutting requirement is verifiable on a live two-host deployment (woltje.com ↔ uscllc.com).
|
||||
- [x] AC-1: Admin bootstrap completes successfully end-to-end on a fresh install (DTO value import, no forbidNonWhitelisted regression); covered by an integration or e2e test that exercises the real DTO binding. _(PR #440)_
|
||||
- [x] AC-2: Wizard fails loudly (non-zero exit, clear error) when the bootstrap stage returns `completed: false`, in both interactive and headless modes. No more silent `✔ Wizard complete` after a 400. _(PR #440)_
|
||||
- [x] AC-3: Gateway port prompt prefills `14242` in the input field (user can press Enter to accept). _(PR #440)_
|
||||
- [x] AC-4: `"What is Mosaic?"` intro copy mentions Pi SDK as the underlying agent runtime. _(PR #440)_
|
||||
- [x] AC-5: Release `mosaic-v0.0.26` tagged and published to the Gitea npm registry, unblocking the 0.0.25 happy path. _(tag: mosaic-v0.0.26, registry: 0.0.26 live)_
|
||||
- [ ] AC-6: CORS origin prompt replaced with FQDN/hostname input; CORS string is derived from that.
|
||||
- [ ] AC-7: Skill / additional feature install section is reworked until it is actually usable end-to-end (worker defines the concrete failure modes during diagnosis).
|
||||
- [ ] AC-8: First-run flow has a drill-down main menu with at least `Plugins` (Recommended / Custom), `Providers`, and the other top-level configuration groups. Linear interrogation is gone.
|
||||
- [ ] AC-9: `Quick Start` path completes with a minimal, curated set of questions (target: under 90 seconds for a returning user; define the exact baseline during design).
|
||||
- [ ] AC-10: Provider setup happens first, driven by a natural-language intake prompt. The agent expounds on the user's intent and chooses its own name based on that intent (OpenClaw-style). Naming is confirmable / overridable.
|
||||
- [ ] AC-11: All milestones ship as merged PRs with green CI and closed issues.
|
||||
|
||||
- [ ] AC-MVP-1: All declared workstreams reach `complete` status with merged PRs and green CI
|
||||
- [ ] AC-MVP-2: A user session on the home gateway can transparently query work-gateway data subject to scope, with no data persisted across the boundary
|
||||
- [ ] AC-MVP-3: The same user-facing capability is reachable from webUI, TUI, and CLI (per MVP-X1)
|
||||
- [ ] AC-MVP-4: Two-gateway production deployment (woltje.com ↔ uscllc.com) operational ≥7 days without incident
|
||||
- [ ] AC-MVP-5: All cross-cutting requirements (MVP-X1 → MVP-X7) verified with evidence
|
||||
- [ ] AC-MVP-6: PRD `docs/PRD.md` "In Scope (v0.1.0 Beta)" list mapped to evidence (each item: shipped / explicitly deferred with rationale)
|
||||
## Milestones
|
||||
|
||||
## Workstreams
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------- | ------------------------------------------------------------ | ----------- | ---------------------- | ----- | ---------- | ---------- |
|
||||
| 1 | IUV-M01 | Hotfix: bootstrap DTO + wizard failure + port prefill + copy | complete | fix/bootstrap-hotfix | #436 | 2026-04-05 | 2026-04-05 |
|
||||
| 2 | IUV-M02 | UX polish: CORS/FQDN, skill installer rework | complete | feat/install-ux-polish | #437 | 2026-04-05 | 2026-04-05 |
|
||||
| 3 | IUV-M03 | Provider-first intelligent flow + drill-down main menu | not-started | feat/install-ux-intent | #438 | — | — |
|
||||
|
||||
| # | ID | Name | Status | Manifest | Notes |
|
||||
| --- | --- | ------------------------------------------- | ----------------- | ----------------------------------------------------------------------- | --------------------------------------------------- |
|
||||
| W1 | FED | Federation v1 | planning-complete | [docs/federation/MISSION-MANIFEST.md](./federation/MISSION-MANIFEST.md) | 7 milestones, ~175K tokens, issues #460–#466 filed |
|
||||
| W2+ | TBD | (additional workstreams declared as scoped) | — | — | Scope creep is expected and explicitly accommodated |
|
||||
## Subagent Delegation Plan
|
||||
|
||||
### Likely Additional Workstreams (Not Yet Declared)
|
||||
|
||||
These are anticipated based on the PRD `In Scope` list but are NOT counted toward MVP completion until they have their own manifest, milestones, and tracking issues. Listed here so the orchestrator knows what's likely coming.
|
||||
|
||||
- Web dashboard parity with PRD scope (chat, tasks, projects, missions, agent status surfaces)
|
||||
- Pi TUI integration for terminal-native agent work
|
||||
- CLI completeness for headless / scripted workflows that mirror webUI capability
|
||||
- Remote control plugins (Discord priority, then Telegram)
|
||||
- Multi-user / SSO finishing (BetterAuth + Authentik/WorkOS/Keycloak adapters per PRD)
|
||||
- LLM provider expansion (Anthropic, Codex, Z.ai, Ollama, LM Studio, llama.cpp) + routing matrix
|
||||
- MCP server/client capability + skill import interface
|
||||
- Brain (`@mosaicstack/brain`) as the structured data layer on PG + vector
|
||||
|
||||
When any of these solidify into a real workstream, add a row to the Workstreams table, create a workstream-level manifest under `docs/{workstream}/MISSION-MANIFEST.md`, and file tracking issues.
|
||||
| Milestone | Recommended Tier | Rationale |
|
||||
| --------- | ---------------- | --------------------------------------------------------------------- |
|
||||
| IUV-M01 | sonnet | Tight bug cluster with known fix sites + small release cycle |
|
||||
| IUV-M02 | sonnet | UX rework, moderate surface, diagnostic-heavy for the skill installer |
|
||||
| IUV-M03 | opus | Architectural redesign of first-run flow, state machine + LLM intake |
|
||||
|
||||
## Risks
|
||||
|
||||
- **Scope creep is the named risk.** Workstreams will be added; the rule is that each must have its own manifest + milestones + acceptance criteria before it consumes execution capacity.
|
||||
- **Federation urgency vs. surface parity** — federation is being built first because it unblocks the user, but webUI/TUI/CLI parity (MVP-X1) cannot slip indefinitely. Track surface coverage explicitly when each workstream lands.
|
||||
- **Three-surface fan-out** — the same capability exposed three ways multiplies test surface and design effort. Default to a shared API/contract layer, then thin surface adapters; resist surface-specific business logic.
|
||||
- **Federated-tier dependency** — MVP requires PG + pgvector + Valkey; users on local/standalone tier cannot federate. This is intentional but must be communicated clearly in the wizard.
|
||||
- **Hotfix regression surface** — the `import type` → `import` fix on the DTO class is one character but needs an integration test that binds the real DTO, not just a controller unit test, to prevent the same class-erasure regression from sneaking back in.
|
||||
- **LLM-driven intake latency / offline** — M03's provider-first intent flow assumes an available LLM call to expound on user input and choose a name. Offline installs need a deterministic fallback.
|
||||
- **Menu vs. linear back-compat** — M03 changes the top-level flow shape; existing `tools/install.sh --yes` + env-var headless path must continue to work.
|
||||
- **Scope creep in M03** — "redesign the wizard" can absorb arbitrary work. Keep it bounded with explicit non-goals.
|
||||
|
||||
## Out of Scope (MVP)
|
||||
## Out of Scope
|
||||
|
||||
- SaaS / multi-tenant revenue model — personal/family/team tool only
|
||||
- Mobile native apps — responsive web only
|
||||
- Public npm registry publishing — Gitea registry only
|
||||
- Voice / video agent interaction
|
||||
- Full OpenClaw feature parity — inspiration only
|
||||
- Calendar / GLPI / Woodpecker tooling integrations (deferred to post-MVP)
|
||||
|
||||
## Session History
|
||||
|
||||
For sessions 1–14 (phase-based execution, 2026-03-13 → 2026-03-15), see [scratchpads/mvp-20260312.md](./scratchpads/mvp-20260312.md). Sessions below are tracked at the rollup level.
|
||||
|
||||
| Session | Date | Runtime | Outcome |
|
||||
| ------- | ---------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| S15 | 2026-04-19 | claude | MVP rollup manifest authored. Install-ux-v2 archived (IUV-M03 retroactively closed — shipped via PR #446 + releases 0.0.27 → 0.0.29). Federation v1 planning landed via PR #468. W1 manifest reachable at `docs/federation/MISSION-MANIFEST.md`. Next: kickoff FED-M1. |
|
||||
|
||||
## Next Step
|
||||
|
||||
Begin W1 / FED-M1 — federated tier infrastructure. Task breakdown lives at [docs/federation/TASKS.md](./federation/TASKS.md).
|
||||
- Migrating the wizard to a GUI / web UI (still terminal-first)
|
||||
- Replacing the Gitea registry or the Woodpecker publish pipeline
|
||||
- Multi-tenant / multi-user onboarding (still single-admin bootstrap)
|
||||
- Reworking `mosaic uninstall` (M01 of the parent mission — stable)
|
||||
|
||||
@@ -1,40 +1,39 @@
|
||||
# Tasks — MVP (Top-Level Rollup)
|
||||
# Tasks — Install UX v2
|
||||
|
||||
> Single-writer: orchestrator only. Workers read but never modify.
|
||||
>
|
||||
> **Mission:** mvp-20260312
|
||||
> **Manifest:** [docs/MISSION-MANIFEST.md](./MISSION-MANIFEST.md)
|
||||
>
|
||||
> This file is a **rollup**. Per-workstream task breakdowns live in workstream task files
|
||||
> (e.g. `docs/federation/TASKS.md`). Workers operating inside a workstream should treat
|
||||
> the workstream file as their primary task source; this file exists for orchestrator-level
|
||||
> visibility into MVP-wide state.
|
||||
>
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed`
|
||||
> **Mission:** install-ux-v2-20260405
|
||||
> **Schema:** `| id | status | description | issue | agent | branch | depends_on | estimate | notes |`
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed` | `needs-qa`
|
||||
> **Agent values:** `codex` | `sonnet` | `haiku` | `opus` | `—` (auto)
|
||||
|
||||
## Workstream Rollup
|
||||
## Milestone 1 — Hotfix: bootstrap DTO + wizard failure + port prefill + copy (IUV-M01)
|
||||
|
||||
| id | status | workstream | progress | tasks file | notes |
|
||||
| --- | ----------------- | ------------------- | ---------------- | ------------------------------------------------- | --------------------------------------------------------------- |
|
||||
| W1 | planning-complete | Federation v1 (FED) | 0 / 7 milestones | [docs/federation/TASKS.md](./federation/TASKS.md) | M1 task breakdown populated; M2–M7 deferred to mission planning |
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | ------ | -------------------- | ---------- | -------- | --------------------------------------------------------------------------------------- |
|
||||
| IUV-01-01 | done | Fix `apps/gateway/src/admin/bootstrap.controller.ts:16` — switch `import type { BootstrapSetupDto }` to a value import so Nest's `@Body()` binds the real class | #436 | sonnet | fix/bootstrap-hotfix | — | 3K | PR #440 merged `0ae932ab` |
|
||||
| IUV-01-02 | done | Add integration / e2e test that POSTs `/api/bootstrap/setup` with `{name,email,password}` against a real Nest app instance and asserts 201 — NOT a mocked controller unit test | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-01 | 10K | `apps/gateway/src/admin/bootstrap.e2e.spec.ts` — 4 tests; unplugin-swc added for vitest |
|
||||
| IUV-01-03 | done | `packages/mosaic/src/wizard.ts:147` — propagate `!bootstrapResult.completed` as a wizard failure in **interactive** mode too (not only headless); non-zero exit + no `✔ Wizard complete` line | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-02 | 5K | removed `&& headlessRun` guard |
|
||||
| IUV-01-04 | done | Gateway port prompt prefills `14242` in the input buffer — investigate why `promptPort`'s `defaultValue` isn't reaching the user-visible input | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-03 | 5K | added `initialValue` through prompter interface → clack |
|
||||
| IUV-01-05 | done | `"What is Mosaic?"` intro copy updated to mention Pi SDK as the underlying agent runtime (alongside Claude Code / Codex / OpenCode) | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-04 | 2K | `packages/mosaic/src/stages/welcome.ts` |
|
||||
| IUV-01-06 | done | Tests + code review + PR merge + tag `mosaic-v0.0.26` + Gitea release + npm registry republish | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-05 | 10K | PRs #440/#441/#442 merged; tag `mosaic-v0.0.26`; registry latest=0.0.26 ✓ |
|
||||
|
||||
## Cross-Cutting Tracking
|
||||
## Milestone 2 — UX polish: CORS/FQDN, skill installer rework (IUV-M02)
|
||||
|
||||
These are MVP-level checks that don't belong to any single workstream. Updated by the orchestrator at each session.
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------- | ---------- | -------- | ---------------------------------------------------------------------- |
|
||||
| IUV-02-01 | done | Replace CORS origin prompt with FQDN / hostname input; derive the CORS value internally; default to `localhost` with clear help text | #437 | sonnet | feat/install-ux-polish | — | 10K | `deriveCorsOrigin()` pure fn; MOSAIC_HOSTNAME headless var; PR #444 |
|
||||
| IUV-02-02 | done | Diagnose and document the concrete failure modes of the current skill / additional feature install section end-to-end | #437 | sonnet | feat/install-ux-polish | IUV-02-01 | 8K | selection→install gap, silent catch{}, no whitelist concept |
|
||||
| IUV-02-03 | done | Rework the skill installer so it is usable end-to-end (selection, install, verify, failure reporting) | #437 | sonnet | feat/install-ux-polish | IUV-02-02 | 20K | MOSAIC_INSTALL_SKILLS env var whitelist; SyncSkillsResult typed return |
|
||||
| IUV-02-04 | done | Tests + code review + PR merge | #437 | sonnet | feat/install-ux-polish | IUV-02-03 | 10K | 18 new tests (13 CORS + 5 skills); PR #444 merged `172bacb3` |
|
||||
|
||||
| id | status | description | notes |
|
||||
| ------- | ----------- | -------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
||||
| MVP-T01 | done | Author MVP-level manifest at `docs/MISSION-MANIFEST.md` | This session (2026-04-19); PR pending |
|
||||
| MVP-T02 | done | Archive install-ux-v2 mission state to `docs/archive/missions/install-ux-v2-20260405/` | IUV-M03 retroactively closed (shipped via PR #446 + releases 0.0.27→0.0.29) |
|
||||
| MVP-T03 | done | Land federation v1 planning artifacts on `main` | PR #468 merged 2026-04-19 (commit `66512550`) |
|
||||
| MVP-T04 | not-started | Sync `.mosaic/orchestrator/mission.json` MVP slot with this manifest (milestone enumeration, etc.) | Coord state file; consider whether to repopulate via `mosaic coord` or accept hand-edit |
|
||||
| MVP-T05 | in-progress | Kick off W1 / FED-M1 — federated tier infrastructure | Session 16 (2026-04-19): FED-M1-01 in-progress on `feat/federation-m1-tier-config` |
|
||||
| MVP-T06 | not-started | Declare additional workstreams (web dashboard, TUI/CLI parity, remote control, etc.) as scope solidifies | Track each new workstream by adding a row to the Workstream Rollup |
|
||||
## Milestone 3 — Provider-first intelligent flow + drill-down main menu (IUV-M03)
|
||||
|
||||
## Pointer to Active Workstream
|
||||
|
||||
Active workstream is **W1 — Federation v1**. Workers should:
|
||||
|
||||
1. Read [docs/federation/MISSION-MANIFEST.md](./federation/MISSION-MANIFEST.md) for workstream scope
|
||||
2. Read [docs/federation/TASKS.md](./federation/TASKS.md) for the next pending task
|
||||
3. Follow per-task agent + tier guidance from the workstream manifest
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ----- | ---------------------- | ---------- | -------- | ------------------------------------------------------------- |
|
||||
| IUV-03-01 | not-started | Design doc: new first-run state machine — main menu (Plugins / Providers / …), Quick Start vs Custom paths, provider-first flow, intent intake + naming loop | #438 | opus | feat/install-ux-intent | — | 15K | scratchpad + explicit non-goals |
|
||||
| IUV-03-02 | not-started | Implement drill-down main menu (Plugins: Recommended / Custom, Providers, …) as the top-level entry point of `mosaic wizard` | #438 | opus | feat/install-ux-intent | IUV-03-01 | 25K | |
|
||||
| IUV-03-03 | not-started | Quick Start path: curated minimum question set — define the exact baseline, delete everything else from the fast path | #438 | opus | feat/install-ux-intent | IUV-03-02 | 15K | |
|
||||
| IUV-03-04 | not-started | Provider-first natural-language intake: user describes intent → agent expounds → agent proposes a name (confirmable / overridable) — OpenClaw-style | #438 | opus | feat/install-ux-intent | IUV-03-03 | 25K | offline fallback required (deterministic default name + path) |
|
||||
| IUV-03-05 | not-started | Preserve backward-compat: headless path (`MOSAIC_ASSUME_YES=1` + env vars) still works end-to-end; `tools/install.sh --yes` unchanged | #438 | opus | feat/install-ux-intent | IUV-03-04 | 10K | |
|
||||
| IUV-03-06 | not-started | Tests + code review + PR merge + `mosaic-v0.0.27` release | #438 | opus | feat/install-ux-intent | IUV-03-05 | 15K | |
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
# Mission Manifest — Install UX v2
|
||||
|
||||
> Persistent document tracking full mission scope, status, and session history.
|
||||
> Updated by the orchestrator at each phase transition and milestone completion.
|
||||
|
||||
## Mission
|
||||
|
||||
**ID:** install-ux-v2-20260405
|
||||
**Statement:** The install-ux-hardening mission shipped the plumbing (uninstall, masked password, hooks consent, unified flow, headless path), but the first real end-to-end run surfaced a critical regression and a collection of UX failings that make the wizard feel neither quick nor intelligent. This mission closes the bootstrap regression as a hotfix, then rethinks the first-run experience around a provider-first, intent-driven flow with a drill-down main menu and a genuinely fast quick-start.
|
||||
**Phase:** Closed
|
||||
**Current Milestone:** —
|
||||
**Progress:** 3 / 3 milestones
|
||||
**Status:** complete
|
||||
**Last Updated:** 2026-04-19 (archived during MVP manifest authoring; IUV-M03 substantively shipped via PR #446 — drill-down menu + provider-first flow + quick start; releases 0.0.27 → 0.0.29)
|
||||
**Archived to:** `docs/archive/missions/install-ux-v2-20260405/`
|
||||
**Parent Mission:** [install-ux-hardening-20260405](./archive/missions/install-ux-hardening-20260405/MISSION-MANIFEST.md) (complete — `mosaic-v0.0.25`)
|
||||
|
||||
## Context
|
||||
|
||||
Real-run testing of `@mosaicstack/mosaic@0.0.25` uncovered:
|
||||
|
||||
1. **Critical:** admin bootstrap fails with HTTP 400 `property email should not exist` — `bootstrap.controller.ts` uses `import type { BootstrapSetupDto }`, erasing the class at runtime. Nest's `@Body()` falls back to plain `Object` metatype, and ValidationPipe with `forbidNonWhitelisted` rejects every property. One-character fix (drop the `type` keyword), but it blocks the happy path of the release that just shipped.
|
||||
2. The wizard reports `✔ Wizard complete` and `✔ Done` _after_ the bootstrap 400 — failure only propagates in headless mode (`wizard.ts:147`).
|
||||
3. The gateway port prompt does not prefill `14242` in the input buffer.
|
||||
4. `"What is Mosaic?"` intro copy does not mention Pi SDK (the actual agent runtime behind Claude/Codex/OpenCode).
|
||||
5. CORS origin prompt is confusing — the user should be able to supply an FQDN/hostname and have the system derive the CORS value.
|
||||
6. Skill / additional feature install section is unusable in practice.
|
||||
7. Quick-start asks far too many questions to be meaningfully "quick".
|
||||
8. No drill-down main menu — everything is a linear interrogation.
|
||||
9. Provider setup happens late and without intelligence. An OpenClaw-style provider-first flow would let the user describe what they want in natural language, have the agent expound on it, and have the agent choose its own name based on that intent.
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [x] AC-1: Admin bootstrap completes successfully end-to-end on a fresh install (DTO value import, no forbidNonWhitelisted regression); covered by an integration or e2e test that exercises the real DTO binding. _(PR #440)_
|
||||
- [x] AC-2: Wizard fails loudly (non-zero exit, clear error) when the bootstrap stage returns `completed: false`, in both interactive and headless modes. No more silent `✔ Wizard complete` after a 400. _(PR #440)_
|
||||
- [x] AC-3: Gateway port prompt prefills `14242` in the input field (user can press Enter to accept). _(PR #440)_
|
||||
- [x] AC-4: `"What is Mosaic?"` intro copy mentions Pi SDK as the underlying agent runtime. _(PR #440)_
|
||||
- [x] AC-5: Release `mosaic-v0.0.26` tagged and published to the Gitea npm registry, unblocking the 0.0.25 happy path. _(tag: mosaic-v0.0.26, registry: 0.0.26 live)_
|
||||
- [ ] AC-6: CORS origin prompt replaced with FQDN/hostname input; CORS string is derived from that.
|
||||
- [ ] AC-7: Skill / additional feature install section is reworked until it is actually usable end-to-end (worker defines the concrete failure modes during diagnosis).
|
||||
- [ ] AC-8: First-run flow has a drill-down main menu with at least `Plugins` (Recommended / Custom), `Providers`, and the other top-level configuration groups. Linear interrogation is gone.
|
||||
- [ ] AC-9: `Quick Start` path completes with a minimal, curated set of questions (target: under 90 seconds for a returning user; define the exact baseline during design).
|
||||
- [ ] AC-10: Provider setup happens first, driven by a natural-language intake prompt. The agent expounds on the user's intent and chooses its own name based on that intent (OpenClaw-style). Naming is confirmable / overridable.
|
||||
- [ ] AC-11: All milestones ship as merged PRs with green CI and closed issues.
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------- | ------------------------------------------------------------ | -------- | ---------------------- | ----- | ---------- | ---------- |
|
||||
| 1 | IUV-M01 | Hotfix: bootstrap DTO + wizard failure + port prefill + copy | complete | fix/bootstrap-hotfix | #436 | 2026-04-05 | 2026-04-05 |
|
||||
| 2 | IUV-M02 | UX polish: CORS/FQDN, skill installer rework | complete | feat/install-ux-polish | #437 | 2026-04-05 | 2026-04-05 |
|
||||
| 3 | IUV-M03 | Provider-first intelligent flow + drill-down main menu | complete | feat/install-ux-intent | #438 | 2026-04-05 | 2026-04-19 |
|
||||
|
||||
## Subagent Delegation Plan
|
||||
|
||||
| Milestone | Recommended Tier | Rationale |
|
||||
| --------- | ---------------- | --------------------------------------------------------------------- |
|
||||
| IUV-M01 | sonnet | Tight bug cluster with known fix sites + small release cycle |
|
||||
| IUV-M02 | sonnet | UX rework, moderate surface, diagnostic-heavy for the skill installer |
|
||||
| IUV-M03 | opus | Architectural redesign of first-run flow, state machine + LLM intake |
|
||||
|
||||
## Risks
|
||||
|
||||
- **Hotfix regression surface** — the `import type` → `import` fix on the DTO class is one character but needs an integration test that binds the real DTO, not just a controller unit test, to prevent the same class-erasure regression from sneaking back in.
|
||||
- **LLM-driven intake latency / offline** — M03's provider-first intent flow assumes an available LLM call to expound on user input and choose a name. Offline installs need a deterministic fallback.
|
||||
- **Menu vs. linear back-compat** — M03 changes the top-level flow shape; existing `tools/install.sh --yes` + env-var headless path must continue to work.
|
||||
- **Scope creep in M03** — "redesign the wizard" can absorb arbitrary work. Keep it bounded with explicit non-goals.
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- Migrating the wizard to a GUI / web UI (still terminal-first)
|
||||
- Replacing the Gitea registry or the Woodpecker publish pipeline
|
||||
- Multi-tenant / multi-user onboarding (still single-admin bootstrap)
|
||||
- Reworking `mosaic uninstall` (M01 of the parent mission — stable)
|
||||
@@ -1,39 +0,0 @@
|
||||
# Tasks — Install UX v2
|
||||
|
||||
> Single-writer: orchestrator only. Workers read but never modify.
|
||||
>
|
||||
> **Mission:** install-ux-v2-20260405
|
||||
> **Schema:** `| id | status | description | issue | agent | branch | depends_on | estimate | notes |`
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed` | `needs-qa`
|
||||
> **Agent values:** `codex` | `sonnet` | `haiku` | `opus` | `—` (auto)
|
||||
|
||||
## Milestone 1 — Hotfix: bootstrap DTO + wizard failure + port prefill + copy (IUV-M01)
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | ------ | -------------------- | ---------- | -------- | --------------------------------------------------------------------------------------- |
|
||||
| IUV-01-01 | done | Fix `apps/gateway/src/admin/bootstrap.controller.ts:16` — switch `import type { BootstrapSetupDto }` to a value import so Nest's `@Body()` binds the real class | #436 | sonnet | fix/bootstrap-hotfix | — | 3K | PR #440 merged `0ae932ab` |
|
||||
| IUV-01-02 | done | Add integration / e2e test that POSTs `/api/bootstrap/setup` with `{name,email,password}` against a real Nest app instance and asserts 201 — NOT a mocked controller unit test | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-01 | 10K | `apps/gateway/src/admin/bootstrap.e2e.spec.ts` — 4 tests; unplugin-swc added for vitest |
|
||||
| IUV-01-03 | done | `packages/mosaic/src/wizard.ts:147` — propagate `!bootstrapResult.completed` as a wizard failure in **interactive** mode too (not only headless); non-zero exit + no `✔ Wizard complete` line | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-02 | 5K | removed `&& headlessRun` guard |
|
||||
| IUV-01-04 | done | Gateway port prompt prefills `14242` in the input buffer — investigate why `promptPort`'s `defaultValue` isn't reaching the user-visible input | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-03 | 5K | added `initialValue` through prompter interface → clack |
|
||||
| IUV-01-05 | done | `"What is Mosaic?"` intro copy updated to mention Pi SDK as the underlying agent runtime (alongside Claude Code / Codex / OpenCode) | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-04 | 2K | `packages/mosaic/src/stages/welcome.ts` |
|
||||
| IUV-01-06 | done | Tests + code review + PR merge + tag `mosaic-v0.0.26` + Gitea release + npm registry republish | #436 | sonnet | fix/bootstrap-hotfix | IUV-01-05 | 10K | PRs #440/#441/#442 merged; tag `mosaic-v0.0.26`; registry latest=0.0.26 ✓ |
|
||||
|
||||
## Milestone 2 — UX polish: CORS/FQDN, skill installer rework (IUV-M02)
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------- | ---------- | -------- | ---------------------------------------------------------------------- |
|
||||
| IUV-02-01 | done | Replace CORS origin prompt with FQDN / hostname input; derive the CORS value internally; default to `localhost` with clear help text | #437 | sonnet | feat/install-ux-polish | — | 10K | `deriveCorsOrigin()` pure fn; MOSAIC_HOSTNAME headless var; PR #444 |
|
||||
| IUV-02-02 | done | Diagnose and document the concrete failure modes of the current skill / additional feature install section end-to-end | #437 | sonnet | feat/install-ux-polish | IUV-02-01 | 8K | selection→install gap, silent catch{}, no whitelist concept |
|
||||
| IUV-02-03 | done | Rework the skill installer so it is usable end-to-end (selection, install, verify, failure reporting) | #437 | sonnet | feat/install-ux-polish | IUV-02-02 | 20K | MOSAIC_INSTALL_SKILLS env var whitelist; SyncSkillsResult typed return |
|
||||
| IUV-02-04 | done | Tests + code review + PR merge | #437 | sonnet | feat/install-ux-polish | IUV-02-03 | 10K | 18 new tests (13 CORS + 5 skills); PR #444 merged `172bacb3` |
|
||||
|
||||
## Milestone 3 — Provider-first intelligent flow + drill-down main menu (IUV-M03)
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ----- | ---------------------- | ---------- | -------- | ------------------------------------------------------------- |
|
||||
| IUV-03-01 | not-started | Design doc: new first-run state machine — main menu (Plugins / Providers / …), Quick Start vs Custom paths, provider-first flow, intent intake + naming loop | #438 | opus | feat/install-ux-intent | — | 15K | scratchpad + explicit non-goals |
|
||||
| IUV-03-02 | not-started | Implement drill-down main menu (Plugins: Recommended / Custom, Providers, …) as the top-level entry point of `mosaic wizard` | #438 | opus | feat/install-ux-intent | IUV-03-01 | 25K | |
|
||||
| IUV-03-03 | not-started | Quick Start path: curated minimum question set — define the exact baseline, delete everything else from the fast path | #438 | opus | feat/install-ux-intent | IUV-03-02 | 15K | |
|
||||
| IUV-03-04 | not-started | Provider-first natural-language intake: user describes intent → agent expounds → agent proposes a name (confirmable / overridable) — OpenClaw-style | #438 | opus | feat/install-ux-intent | IUV-03-03 | 25K | offline fallback required (deterministic default name + path) |
|
||||
| IUV-03-05 | not-started | Preserve backward-compat: headless path (`MOSAIC_ASSUME_YES=1` + env vars) still works end-to-end; `tools/install.sh --yes` unchanged | #438 | opus | feat/install-ux-intent | IUV-03-04 | 10K | |
|
||||
| IUV-03-06 | not-started | Tests + code review + PR merge + `mosaic-v0.0.27` release | #438 | opus | feat/install-ux-intent | IUV-03-05 | 15K | |
|
||||
@@ -1,368 +0,0 @@
|
||||
# Mosaic Stack — Federation Implementation Milestones
|
||||
|
||||
**Companion to:** `PRD.md`
|
||||
**Approach:** Each milestone is a verifiable slice. A milestone is "done" only when its acceptance tests pass in CI against a real (not mocked) dependency stack.
|
||||
|
||||
---
|
||||
|
||||
## Milestone Dependency Graph
|
||||
|
||||
```
|
||||
M1 (federated tier infra)
|
||||
└── M2 (Step-CA + grant schema + CLI)
|
||||
└── M3 (mTLS handshake + list/get + scope enforcement)
|
||||
├── M4 (search + audit + rate limit)
|
||||
│ └── M5 (cache + offline degradation + OTEL)
|
||||
├── M6 (revocation + auto-renewal) ◄── can start after M3
|
||||
└── M7 (multi-user hardening + e2e suite) ◄── depends on M4+M5+M6
|
||||
```
|
||||
|
||||
M5 and M6 can run in parallel once M4 is merged.
|
||||
|
||||
---
|
||||
|
||||
## Test Strategy (applies to all milestones)
|
||||
|
||||
Three layers, all required before a milestone ships:
|
||||
|
||||
| Layer | Scope | Runtime |
|
||||
| ------------------ | --------------------------------------------- | ------------------------------------------------------------------------ |
|
||||
| **Unit** | Per-module logic, pure functions, adapters | Vitest, no I/O |
|
||||
| **Integration** | Single gateway against real PG/Valkey/Step-CA | Vitest + Docker Compose test profile |
|
||||
| **Federation E2E** | Two gateways on a Docker network, real mTLS | Playwright/custom harness (`tools/federation-harness/`) introduced in M3 |
|
||||
|
||||
Every milestone adds tests to these layers. A milestone cannot be claimed complete if the federation E2E harness fails (applies from M3 onward).
|
||||
|
||||
**Quality gates per milestone** (same as stack-wide):
|
||||
|
||||
- `pnpm typecheck` green
|
||||
- `pnpm lint` green
|
||||
- `pnpm test` green (unit + integration)
|
||||
- `pnpm test:federation` green (M3+)
|
||||
- Independent code review passed
|
||||
- Docs updated (`docs/federation/`)
|
||||
- Merged PR on `main`, CI terminal green, linked issue closed
|
||||
|
||||
---
|
||||
|
||||
## M1 — Federated Tier Infrastructure
|
||||
|
||||
**Goal:** A gateway can run in `federated` tier with containerized Postgres + Valkey + pgvector, with no federation logic active yet.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- Add `"tier": "federated"` to `mosaic.config.json` schema and validators
|
||||
- Docker Compose `federated` profile (`docker-compose.federated.yml`) adds: Postgres+pgvector (5433), Valkey (6380), dedicated volumes
|
||||
- Tier detector in gateway bootstrap: reads config, asserts required services reachable, refuses to start otherwise
|
||||
- `pgvector` extension installed + verified on startup
|
||||
- Migration logic: safe upgrade path from `local`/`standalone` → `federated` (data export/import script, one-way)
|
||||
- `mosaic doctor` reports tier + service health
|
||||
- Gateway continues to serve as a normal standalone instance (no federation yet)
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- `mosaic.config.json` schema v2 (tier enum includes `federated`)
|
||||
- `apps/gateway/src/bootstrap/tier-detector.ts`
|
||||
- `docker-compose.federated.yml`
|
||||
- `scripts/migrate-to-federated.ts`
|
||||
- Updated `mosaic doctor` output
|
||||
- Updated `packages/storage/src/adapters/postgres.ts` with pgvector support
|
||||
|
||||
**Acceptance tests:**
|
||||
| # | Test | Layer |
|
||||
| - | ---------------------------------------------------------------------------------------- | ----------- |
|
||||
| 1 | Gateway boots in `federated` tier with all services present | Integration |
|
||||
| 2 | Gateway refuses to boot in `federated` tier when Postgres unreachable (fail-fast, clear) | Integration |
|
||||
| 3 | `pgvector` extension available in target DB (`SELECT * FROM pg_extension WHERE extname='vector'`) | Integration |
|
||||
| 4 | Migration script moves a populated `local` (PGlite) instance to `federated` (Postgres) with no data loss | Integration |
|
||||
| 5 | `mosaic doctor` reports correct tier and all services green | Unit |
|
||||
| 6 | Existing standalone behavior regression: agent session works end-to-end, no federation references | E2E (single-gateway) |
|
||||
|
||||
**Estimated budget:** ~20K tokens (infra + config + migration script)
|
||||
**Risk notes:** Pgvector install on existing PG installs is occasionally finicky; test the migration path on a realistic DB snapshot.
|
||||
|
||||
---
|
||||
|
||||
## M2 — Step-CA + Grant Schema + Admin CLI
|
||||
|
||||
**Goal:** An admin can create a federation grant and its counterparty can enroll. No runtime traffic flows yet.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- Embed Step-CA as a Docker Compose sidecar with a persistent CA volume
|
||||
- Gateway exposes a short-lived enrollment endpoint (single-use token from the grant)
|
||||
- DB schema: `federation_grants`, `federation_peers`, `federation_audit_log` (table only, not yet written to)
|
||||
- Sealed storage for `client_key_pem` using the existing credential sealing key
|
||||
- Admin CLI:
|
||||
- `mosaic federation grant create --user <id> --peer <host> --scope <file>`
|
||||
- `mosaic federation grant list`
|
||||
- `mosaic federation grant show <id>`
|
||||
- `mosaic federation peer add <enrollment-url>`
|
||||
- `mosaic federation peer list`
|
||||
- Step-CA signs the cert with SAN OIDs for `grantId` + `subjectUserId`
|
||||
- Grant status transitions: `pending` → `active` on successful enrollment
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- `packages/db` migration: three federation tables + enum types
|
||||
- `apps/gateway/src/federation/ca.service.ts` (Step-CA client)
|
||||
- `apps/gateway/src/federation/grants.service.ts`
|
||||
- `apps/gateway/src/federation/enrollment.controller.ts`
|
||||
- `packages/mosaic/src/commands/federation/` (grant + peer subcommands)
|
||||
- `docker-compose.federated.yml` adds Step-CA service
|
||||
- Scope JSON schema + validator
|
||||
|
||||
**Acceptance tests:**
|
||||
| # | Test | Layer |
|
||||
| - | ---------------------------------------------------------------------------------------- | ----------- |
|
||||
| 1 | `grant create` writes a `pending` row with a scoped bundle | Integration |
|
||||
| 2 | Enrollment endpoint signs a CSR and returns a cert with expected SAN OIDs | Integration |
|
||||
| 3 | Enrollment token is single-use; second attempt returns 410 | Integration |
|
||||
| 4 | Cert `subjectUserId` OID matches the grant's `subject_user_id` | Unit |
|
||||
| 5 | `client_key_pem` is at-rest encrypted; raw DB read shows ciphertext, not PEM | Integration |
|
||||
| 6 | `peer add <url>` on Server A yields an `active` peer record with a valid cert + key | E2E (two gateways, no traffic) |
|
||||
| 7 | Scope JSON with unknown resource type rejected at `grant create` | Unit |
|
||||
| 8 | `grant list` and `peer list` render active / pending / revoked accurately | Unit |
|
||||
|
||||
**Estimated budget:** ~30K tokens (schema + CA integration + CLI + sealing)
|
||||
**Risk notes:** Step-CA's API surface is well-documented but the sealing integration with existing provider-credential encryption is a cross-module concern — walk that seam deliberately.
|
||||
|
||||
---
|
||||
|
||||
## M3 — mTLS Handshake + `list` + `get` with Scope Enforcement
|
||||
|
||||
**Goal:** Two federated gateways exchange real data over mTLS with scope intersecting native RBAC.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- `FederationClient` (outbound): picks cert from `federation_peers`, does mTLS call
|
||||
- `FederationServer` (inbound): NestJS guard validates client cert, extracts `grantId` + `subjectUserId`, loads grant
|
||||
- Scope enforcement pipeline:
|
||||
1. Resource allowlist / excluded-list check
|
||||
2. Native RBAC evaluation as the `subjectUserId`
|
||||
3. Scope filter intersection (`include_teams`, `include_personal`)
|
||||
4. `max_rows_per_query` cap
|
||||
- Verbs: `list`, `get`, `capabilities`
|
||||
- Gateway query layer accepts `source: "local" | "federated:<host>" | "all"`; fan-out for `"all"`
|
||||
- **Federation E2E harness** (`tools/federation-harness/`): docker-compose.two-gateways.yml, seed script, assertion helpers — this is its own deliverable
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- `apps/gateway/src/federation/client/federation-client.service.ts`
|
||||
- `apps/gateway/src/federation/server/federation-auth.guard.ts`
|
||||
- `apps/gateway/src/federation/server/scope.service.ts`
|
||||
- `apps/gateway/src/federation/server/verbs/{list,get,capabilities}.controller.ts`
|
||||
- `apps/gateway/src/federation/client/query-source.service.ts` (fan-out/merge)
|
||||
- `tools/federation-harness/` (compose + seed + test helpers)
|
||||
- `packages/types` — federation request/response DTOs in `federation.dto.ts`
|
||||
|
||||
**Acceptance tests:**
|
||||
| # | Test | Layer |
|
||||
| -- | -------------------------------------------------------------------------------------------------------- | ----- |
|
||||
| 1 | A→B `list tasks` returns subjectUser's tasks intersected with scope | E2E |
|
||||
| 2 | A→B `list tasks` with `include_teams: [T1]` excludes T2 tasks the user owns | E2E |
|
||||
| 3 | A→B `get credential <id>` returns 403 when `credentials` is in `excluded_resources` | E2E |
|
||||
| 4 | Client presenting cert for grant X cannot query subjectUser of grant Y (cross-user isolation) | E2E |
|
||||
| 5 | Cert signed by untrusted CA rejected at TLS layer (no NestJS handler reached) | E2E |
|
||||
| 6 | Malformed SAN OIDs → 401; cert valid but grant revoked in DB → 403 | Integration |
|
||||
| 7 | `max_rows_per_query` caps response; request for more paginated | Integration |
|
||||
| 8 | `source: "all"` fan-out merges local + federated results, each tagged with `_source` | Integration |
|
||||
| 9 | Federation responses never persist: verify DB row count unchanged after `list` round-trip | E2E |
|
||||
| 10 | Scope cannot grant more than native RBAC: user without access to team T still gets [] even if scope allows T | E2E |
|
||||
|
||||
**Estimated budget:** ~40K tokens (largest milestone — core federation logic + harness)
|
||||
**Risk notes:** This is the critical trust boundary. Code review should focus on scope enforcement bypass and cert-SAN-spoofing paths. Every 403/401 path needs a test.
|
||||
|
||||
---
|
||||
|
||||
## M4 — `search` Verb + Audit Log + Rate Limit
|
||||
|
||||
**Goal:** Keyword search over allowed resources with full audit and per-grant rate limiting.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- `search` verb across `resources` allowlist (intersection of scope + native RBAC)
|
||||
- Keyword search (reuse existing `packages/memory/src/adapters/keyword.ts`); pgvector search stays out of v1 search verb
|
||||
- Every federated request (all verbs) writes to `federation_audit_log`: `grant_id`, `verb`, `resource`, `query_hash`, `outcome`, `bytes_out`, `latency_ms`
|
||||
- No request body captured; `query_hash` is SHA-256 of normalized query params
|
||||
- Token-bucket rate limit per grant (default 60/min, override per grant)
|
||||
- 429 response with `Retry-After` header and structured body
|
||||
- 90-day hot retention for audit log; cold-tier rollover deferred to M7
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- `apps/gateway/src/federation/server/verbs/search.controller.ts`
|
||||
- `apps/gateway/src/federation/server/audit.service.ts` (async write, no blocking)
|
||||
- `apps/gateway/src/federation/server/rate-limit.guard.ts`
|
||||
- Tests in harness
|
||||
|
||||
**Acceptance tests:**
|
||||
| # | Test | Layer |
|
||||
| - | ------------------------------------------------------------------------------------------------- | ----------- |
|
||||
| 1 | `search` returns ranked hits only from allowed resources | E2E |
|
||||
| 2 | `search` excluding `credentials` does not return a match even when keyword matches a credential name | E2E |
|
||||
| 3 | Every successful request appears in `federation_audit_log` within 1s | Integration |
|
||||
| 4 | Denied request (403) is also audited with `outcome='denied'` | Integration |
|
||||
| 5 | Audit row stores query hash but NOT query body | Unit |
|
||||
| 6 | 61st request in 60s window returns 429 with `Retry-After` | E2E |
|
||||
| 7 | Per-grant override (e.g., 600/min) takes effect without restart | Integration |
|
||||
| 8 | Audit writes are async: request latency unchanged when audit write slow (simulated) | Integration |
|
||||
|
||||
**Estimated budget:** ~20K tokens
|
||||
**Risk notes:** Ensure audit writes can't block or error-out the request path; use a bounded queue and drop-with-counter pattern rather than in-line writes.
|
||||
|
||||
---
|
||||
|
||||
## M5 — Cache + Offline Degradation + Observability
|
||||
|
||||
**Goal:** Sessions feel fast and stay useful when the peer is slow or down.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- In-memory response cache keyed by `(grant_id, verb, resource, query_hash)`, TTL 30s default
|
||||
- Cache NOT used for `search`; only `list` and `get`
|
||||
- Cache flushed on cert rotation and grant revocation
|
||||
- Circuit breaker per peer: after N failures, fast-fail for cooldown window
|
||||
- `_source` tagging extended with `_cached: true` when served from cache
|
||||
- Agent-visible "federation offline for `<peer>`" signal emitted once per session per peer
|
||||
- OTEL spans: `federation.request` with attrs `grant_id`, `peer`, `verb`, `resource`, `outcome`, `latency_ms`, `cached`
|
||||
- W3C `traceparent` propagated across the mTLS boundary (both directions)
|
||||
- `mosaic federation status` CLI subcommand
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- `apps/gateway/src/federation/client/response-cache.service.ts`
|
||||
- `apps/gateway/src/federation/client/circuit-breaker.service.ts`
|
||||
- `apps/gateway/src/federation/observability/` (span helpers)
|
||||
- `packages/mosaic/src/commands/federation/status.ts`
|
||||
|
||||
**Acceptance tests:**
|
||||
| # | Test | Layer |
|
||||
| - | --------------------------------------------------------------------------------------------- | ----- |
|
||||
| 1 | Two identical `list` calls within 30s: second served from cache, flagged `_cached` | Integration |
|
||||
| 2 | `search` is never cached: two identical searches both hit the peer | Integration |
|
||||
| 3 | After grant revocation, peer's cache is flushed immediately | Integration |
|
||||
| 4 | After N consecutive failures, circuit opens; subsequent requests fail-fast without network call | E2E |
|
||||
| 5 | Circuit closes after cooldown and next success | E2E |
|
||||
| 6 | With peer offline, session completes using local data, one "federation offline" signal surfaced | E2E |
|
||||
| 7 | OTEL traces show spans on both gateways correlated by `traceparent` | E2E |
|
||||
| 8 | `mosaic federation status` prints peer state, cert expiry, last success/failure, circuit state | Unit |
|
||||
|
||||
**Estimated budget:** ~20K tokens
|
||||
**Risk notes:** Caching correctness under revocation must be provable — write tests that intentionally race revocation against cached hits.
|
||||
|
||||
---
|
||||
|
||||
## M6 — Revocation, Auto-Renewal, CRL
|
||||
|
||||
**Goal:** Grant lifecycle works end-to-end: admin revoke, revoke-on-delete, automatic cert renewal, CRL distribution.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- `mosaic federation grant revoke <id>` → status `revoked`, CRL updated, audit entry
|
||||
- DB hook: deleting a user cascades `revoke-on-delete` on all grants where that user is subject
|
||||
- Step-CA CRL endpoint exposed; serving gateway enforces CRL check on every handshake (cached CRL, refresh interval 60s)
|
||||
- Client-side cert renewal job: at T-7 days, submit renewal CSR; rotate cert atomically; flush cache
|
||||
- On renewal failure, peer marked `degraded` and admin-visible alert emitted
|
||||
- Server A detects revocation on next request (TLS handshake fails with specific error) → peer marked `revoked`, user notified
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- `apps/gateway/src/federation/server/crl.service.ts` + endpoint
|
||||
- `apps/gateway/src/federation/server/revocation.service.ts`
|
||||
- DB cascade trigger or ORM hook for user deletion → grant revocation
|
||||
- `apps/gateway/src/federation/client/renewal.job.ts` (scheduled)
|
||||
- `packages/mosaic/src/commands/federation/grant.ts` gains `revoke` subcommand
|
||||
|
||||
**Acceptance tests:**
|
||||
| # | Test | Layer |
|
||||
| - | ----------------------------------------------------------------------------------------- | ----- |
|
||||
| 1 | Admin `grant revoke` → A's next request fails with TLS-level error | E2E |
|
||||
| 2 | Deleting subject user on B auto-revokes all grants where that user was the subject | Integration |
|
||||
| 3 | CRL endpoint serves correct list; revoked cert present | Integration |
|
||||
| 4 | Server rejects cert listed in CRL even if cert itself is still time-valid | E2E |
|
||||
| 5 | Cert at T-7 days triggers renewal job; new cert issued and installed without dropped requests | E2E |
|
||||
| 6 | Renewal failure marks peer `degraded` and surfaces alert | Integration |
|
||||
| 7 | A marks peer `revoked` after a revocation-caused handshake failure (not on transient network errors) | E2E |
|
||||
|
||||
**Estimated budget:** ~20K tokens
|
||||
**Risk notes:** The atomic cert swap during renewal is the sharpest edge here — any in-flight request mid-swap must either complete on old or retry on new, never fail mid-call.
|
||||
|
||||
---
|
||||
|
||||
## M7 — Multi-User RBAC Hardening + Team-Scoped Grants + Acceptance Suite
|
||||
|
||||
**Goal:** The full multi-tenant scenario from §4 user stories works end-to-end, with no cross-user leakage under any circumstance.
|
||||
|
||||
**Scope:**
|
||||
|
||||
- Three-user scenario on Server B (E1, E2, E3) each with their own Server A
|
||||
- Team-scoped grants exercised: each employee's team-data visible on their own A, but E1's personal data never visible on E2's A
|
||||
- User-facing UI surfaces on both gateways for: peer list, grant list, audit log viewer, scope editor
|
||||
- Negative-path test matrix (every denial path from PRD §8)
|
||||
- All PRD §15 acceptance criteria mapped to automated tests in the harness
|
||||
- Security review: cert-spoofing, scope-bypass, audit-bypass paths explicitly tested
|
||||
- Cold-storage rollover for audit log >90 days
|
||||
- Docs: operator runbook, onboarding guide, troubleshooting guide
|
||||
|
||||
**Deliverables:**
|
||||
|
||||
- Full federation acceptance suite in `tools/federation-harness/acceptance/`
|
||||
- `apps/web` surfaces for peer/grant/audit management
|
||||
- `docs/federation/RUNBOOK.md`, `docs/federation/ONBOARDING.md`, `docs/federation/TROUBLESHOOTING.md`
|
||||
- Audit cold-tier job (daily cron, moves rows >90d to separate table or object storage)
|
||||
|
||||
**Acceptance tests:**
|
||||
Every PRD §15 criterion must be automated and green. Additionally:
|
||||
|
||||
| # | Test | Layer |
|
||||
| --- | ----------------------------------------------------------------------------------------------------- | ---------------- |
|
||||
| 1 | 3-employee scenario: each A sees only its user's data from B | E2E |
|
||||
| 2 | Grant with team scope returns team data; same grant denied access to another employee's personal data | E2E |
|
||||
| 3 | Concurrent sessions from E1's and E2's Server A to B interleave without any leakage | E2E |
|
||||
| 4 | Audit log across 3-user test shows per-grant trails with no mis-attributed rows | E2E |
|
||||
| 5 | Scope editor UI round-trip: edit → save → next request uses new scope | E2E |
|
||||
| 6 | Attempt to use a revoked grant's cert against a different grant's endpoint: rejected | E2E |
|
||||
| 7 | 90-day-old audit rows moved to cold tier; queryable via explicit historical query | Integration |
|
||||
| 8 | Runbook steps validated: an operator following the runbook can onboard, rotate, and revoke | Manual checklist |
|
||||
|
||||
**Estimated budget:** ~25K tokens
|
||||
**Risk notes:** This is the security-critical milestone. Budget review time here is non-negotiable — plan for two independent code reviews (internal + security-focused) before merge.
|
||||
|
||||
---
|
||||
|
||||
## Total Budget & Timeline Sketch
|
||||
|
||||
| Milestone | Tokens (est.) | Can parallelize? |
|
||||
| --------- | ------------- | ---------------------- |
|
||||
| M1 | 20K | No (foundation) |
|
||||
| M2 | 30K | No (needs M1) |
|
||||
| M3 | 40K | No (needs M2) |
|
||||
| M4 | 20K | No (needs M3) |
|
||||
| M5 | 20K | Yes (with M6 after M4) |
|
||||
| M6 | 20K | Yes (with M5 after M3) |
|
||||
| M7 | 25K | No (needs all) |
|
||||
| **Total** | **~175K** | |
|
||||
|
||||
Parallelization of M5 and M6 after M4 saves one milestone's worth of serial time.
|
||||
|
||||
---
|
||||
|
||||
## Exit Criteria (federation feature complete)
|
||||
|
||||
All of the following must be green on `main`:
|
||||
|
||||
- Every PRD §15 acceptance criterion automated and passing
|
||||
- Every milestone's acceptance table green
|
||||
- Security review sign-off on M7
|
||||
- Runbook walk-through completed by operator (not author)
|
||||
- `mosaic doctor` recognizes federated tier and reports peer health accurately
|
||||
- Two-gateway production deployment (woltje.com ↔ uscllc.com) operational for ≥7 days without incident
|
||||
|
||||
---
|
||||
|
||||
## Next Step After This Doc Is Approved
|
||||
|
||||
1. File tracking issues on `git.mosaicstack.dev/mosaicstack/stack` — one per milestone, labeled `epic:federation`
|
||||
2. Populate `docs/TASKS.md` with M1's task breakdown (per-task agent assignment, budget, dependencies)
|
||||
3. Begin M1 implementation
|
||||
@@ -1,85 +0,0 @@
|
||||
# Mission Manifest — Federation v1
|
||||
|
||||
> Persistent document tracking full mission scope, status, and session history.
|
||||
> Updated by the orchestrator at each phase transition and milestone completion.
|
||||
|
||||
## Mission
|
||||
|
||||
**ID:** federation-v1-20260419
|
||||
**Statement:** Jarvis operates across 3–4 workstations in two physical locations (home, USC). The user currently reaches back to a single jarvis-brain checkout from every session; a prior OpenBrain attempt caused cache, latency, and opacity pain. This mission builds asymmetric federation between Mosaic Stack gateways so that a session on a user's home gateway can query their work gateway in real time without data ever persisting across the boundary, with full multi-tenant isolation and standard-PKI (X.509 / Step-CA) trust management.
|
||||
**Phase:** Planning complete — M1 implementation not started
|
||||
**Current Milestone:** FED-M1
|
||||
**Progress:** 0 / 7 milestones
|
||||
**Status:** active
|
||||
**Last Updated:** 2026-04-19 (PRD + MILESTONES + tracking issues filed)
|
||||
**Parent Mission:** None — new mission
|
||||
|
||||
## Context
|
||||
|
||||
Federation is the solution to what originally drove OpenBrain. The prior attempt coupled every agent session to a remote service, introduced cache/latency/opacity pain, and created a hard dependency that punished offline use. This redesign:
|
||||
|
||||
1. Makes federation **gateway-to-gateway**, not agent-to-service
|
||||
2. Keeps each user's home instance as source of truth for their data
|
||||
3. Exposes scoped, read-only data on demand without persisting across the boundary
|
||||
4. Uses X.509 mTLS via Step-CA so rotation/revocation/CRL/OCSP are standard
|
||||
5. Supports multi-tenant serving sides (employees on uscllc.com each federating back to their own home gateway) with no cross-user leakage
|
||||
6. Requires federation-tier instances on both sides (PG + pgvector + Valkey) — local/standalone tiers cannot federate
|
||||
7. Works over public HTTPS (no VPN required); Tailscale is an optional overlay
|
||||
|
||||
Key design references:
|
||||
|
||||
- `docs/federation/PRD.md` — 16-section product requirements
|
||||
- `docs/federation/MILESTONES.md` — 7-milestone decomposition with per-milestone acceptance tests
|
||||
- `docs/federation/TASKS.md` — per-task breakdown (M1 populated; M2-M7 deferred to mission planning)
|
||||
- `docs/research/mempalace-evaluation/` (in jarvis-brain) — why we didn't adopt MemPalace
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- [ ] AC-1: Two Mosaic Stack gateways on different hosts can establish a federation grant via CLI-driven onboarding
|
||||
- [ ] AC-2: Server A can query Server B for `tasks`, `notes`, `memory` respecting scope filters
|
||||
- [ ] AC-3: User on B with no grant cannot be queried by A, even if A has a valid grant for another user (cross-user isolation)
|
||||
- [ ] AC-4: Revoking a grant on B causes A's next request to fail with a clear error within one request cycle
|
||||
- [ ] AC-5: Cert rotation happens automatically at T-7 days; in-progress session survives rotation without user action
|
||||
- [ ] AC-6: Rate-limit enforcement returns 429 with `Retry-After`; client backs off
|
||||
- [ ] AC-7: With B unreachable, a session on A completes using local data and surfaces "federation offline for `<peer>`" once per session
|
||||
- [ ] AC-8: Every federated request appears in B's `federation_audit_log` within 1 second
|
||||
- [ ] AC-9: Scope excluding `credentials` means credentials are never returned — even via `search` with matching keywords
|
||||
- [ ] AC-10: `mosaic federation status` shows cert expiry, grant status, last success/failure per peer
|
||||
- [ ] AC-11: Full 3-employee multi-tenant scenario passes with no cross-user leakage
|
||||
- [ ] AC-12: Two-gateway production deployment (woltje.com ↔ uscllc.com) operational ≥7 days without incident
|
||||
- [ ] AC-13: All 7 milestones ship as merged PRs with green CI and closed issues
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------ | --------------------------------------------- | ----------- | ------ | ----- | ------- | --------- |
|
||||
| 1 | FED-M1 | Federated tier infrastructure | not-started | — | #460 | — | — |
|
||||
| 2 | FED-M2 | Step-CA + grant schema + admin CLI | not-started | — | #461 | — | — |
|
||||
| 3 | FED-M3 | mTLS handshake + list/get + scope enforcement | not-started | — | #462 | — | — |
|
||||
| 4 | FED-M4 | search verb + audit log + rate limit | not-started | — | #463 | — | — |
|
||||
| 5 | FED-M5 | Cache + offline degradation + OTEL | not-started | — | #464 | — | — |
|
||||
| 6 | FED-M6 | Revocation + auto-renewal + CRL | not-started | — | #465 | — | — |
|
||||
| 7 | FED-M7 | Multi-user RBAC hardening + acceptance suite | not-started | — | #466 | — | — |
|
||||
|
||||
## Budget
|
||||
|
||||
| Milestone | Est. tokens | Parallelizable? |
|
||||
| --------- | ----------- | ---------------------- |
|
||||
| FED-M1 | 20K | No (foundation) |
|
||||
| FED-M2 | 30K | No (needs M1) |
|
||||
| FED-M3 | 40K | No (needs M2) |
|
||||
| FED-M4 | 20K | No (needs M3) |
|
||||
| FED-M5 | 20K | Yes (with M6 after M4) |
|
||||
| FED-M6 | 20K | Yes (with M5 after M3) |
|
||||
| FED-M7 | 25K | No (needs all) |
|
||||
| **Total** | **~175K** | |
|
||||
|
||||
## Session History
|
||||
|
||||
| Session | Date | Runtime | Outcome |
|
||||
| ------- | ---------- | ------- | --------------------------------------------------- |
|
||||
| S1 | 2026-04-19 | claude | PRD authored, MILESTONES decomposed, 7 issues filed |
|
||||
|
||||
## Next Step
|
||||
|
||||
Begin FED-M1 implementation: federated tier infrastructure. Breakdown in `docs/federation/TASKS.md`.
|
||||
@@ -1,330 +0,0 @@
|
||||
# Mosaic Stack — Federation PRD
|
||||
|
||||
**Status:** Draft v1 (locked for implementation)
|
||||
**Owner:** Jason
|
||||
**Date:** 2026-04-19
|
||||
**Scope:** Enables cross-instance data federation between Mosaic Stack gateways with asymmetric trust, multi-tenant scoping, and no cross-boundary data persistence.
|
||||
|
||||
---
|
||||
|
||||
## 1. Problem Statement
|
||||
|
||||
Jarvis operates across 3–4 workstations in two physical locations (home, USC). The user currently reaches back to a single jarvis-brain checkout from every session, and has tried OpenBrain to solve cross-session state — with poor results (cache invalidation, latency, opacity, hard dependency on a remote service).
|
||||
|
||||
The goal is a federation model where each user's **home instance** remains the source of truth for their personal data, and **work/shared instances** expose scoped data to that user's home instance on demand — without persisting anything across the boundary.
|
||||
|
||||
## 2. Goals
|
||||
|
||||
1. A user logged into their **home gateway** (Server A) can query their **work gateway** (Server B) in real time during a session.
|
||||
2. Data returned from Server B is used in-session only; never written to Server A storage.
|
||||
3. Server B has multiple users, each with their own Server A. No user's data leaks to another user.
|
||||
4. Federation works over public HTTPS (no VPN required). Tailscale is a supported optional overlay.
|
||||
5. Sync latency target: seconds, or at the next data need of the agent.
|
||||
6. Graceful degradation: if the remote instance is unreachable, the local session continues with local data and a clear "federation offline" signal.
|
||||
7. Teams exist on both sides. A federation grant can share **team-owned** data without exposing other team members' personal data.
|
||||
8. Auth and revocation use standard PKI (X.509) so that certificate tooling (Step-CA, rotation, OCSP, CRL) is available out of the box.
|
||||
|
||||
## 3. Non-Goals (v1)
|
||||
|
||||
- Mesh federation (N-to-N). v1 is strictly A↔B pairs.
|
||||
- Cross-instance writes. All federation is **read-only** on the remote side.
|
||||
- Shared agent sessions across instances. Sessions live on one instance; federation is data-plane only.
|
||||
- Cross-instance SSO. Each instance owns its own BetterAuth identity store; federation is service-to-service, not user-to-user.
|
||||
- Realtime push from B→A. v1 is pull-only (A pulls from B during a session).
|
||||
- Global search index. Federation is query-by-query, not index replication.
|
||||
|
||||
## 4. User Stories
|
||||
|
||||
- **US-1 (Solo user at home):** As the sole user on Server A, I want my agent session on workstation-1 to see the same data it saw on workstation-2, without running OpenBrain.
|
||||
- **US-2 (Cross-location):** As a user with a home server and a work server, I want a session on my home laptop to transparently pull my USC-owned tasks/notes when I ask for them.
|
||||
- **US-3 (Work admin):** As the admin of mosaic.uscllc.com, I want to grant each employee's home gateway scoped read access to only their own data plus explicitly-shared team data.
|
||||
- **US-4 (Privacy boundary):** As employee A on mosaic.uscllc.com, my data must never appear in a session on employee B's home gateway — even if both are federated with uscllc.com.
|
||||
- **US-5 (Revocation):** As a work admin, when I delete an employee, their home gateway loses access within one request cycle.
|
||||
- **US-6 (Offline):** As a user in a hotel with flaky wifi, my local session keeps working; federation calls fail fast and are reported as "offline," not hung.
|
||||
|
||||
## 5. Architecture Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐ mTLS / X.509 ┌─────────────────────────────────────┐
|
||||
│ Server A — mosaic.woltje.com │ ───────────────────────► │ Server B — mosaic.uscllc.com │
|
||||
│ (home, master for Jason) │ ◄── JSON over HTTPS │ (work, multi-tenant) │
|
||||
│ │ │ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ │ │ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ Gateway │ │ Postgres │ │ │ │ Gateway │ │ Postgres │ │
|
||||
│ │ (NestJS) │──│ (local SSOT)│ │ │ │ (NestJS) │──│ (tenant SSOT)│ │
|
||||
│ └──────┬───────┘ └──────────────┘ │ │ └──────┬───────┘ └──────────────┘ │
|
||||
│ │ │ │ │ │
|
||||
│ │ FederationClient │ │ │ FederationServer │
|
||||
│ │ (outbound, scoped query) │ │ │ (inbound, RBAC-gated) │
|
||||
│ └───────────────────────────┼──────────────────────────┼────────┘ │
|
||||
│ │ │ │
|
||||
│ Step-CA (issues A's client cert) │ │ Step-CA (issues B's server cert, │
|
||||
│ │ │ trusts A's CA root on grant)│
|
||||
└─────────────────────────────────────┘ └──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
- Federation is a **transport-layer** concern between two gateways, implemented as a new internal module on each gateway.
|
||||
- Both sides run the same code. Direction (client vs. server role) is per-request.
|
||||
- Nothing in the agent runtime changes — agents query the gateway; the gateway decides local vs. remote.
|
||||
|
||||
## 6. Transport & Authentication
|
||||
|
||||
**Transport:** HTTPS with mutual TLS (mTLS).
|
||||
|
||||
**Identity:** X.509 client certificates issued by Step-CA. Each federation grant materializes as a client cert on the requesting side and a trust-anchor entry (CA root or explicit cert) on the serving side.
|
||||
|
||||
**Why mTLS over HMAC bearer tokens:**
|
||||
|
||||
- Standard rotation/revocation semantics (renew, CRL, OCSP).
|
||||
- The cert subject carries identity claims (user, grant_id) that don't need a separate DB lookup to verify authenticity.
|
||||
- Client certs never transit request bodies, so they can't be logged by accident.
|
||||
- Transport is pinned at the TLS layer, not re-validated per-handler.
|
||||
|
||||
**Cert contents (SAN + subject):**
|
||||
|
||||
- `CN=grant-<uuid>`
|
||||
- `O=<requesting-server-hostname>` (e.g., `mosaic.woltje.com`)
|
||||
- Custom OIDs embedded in SAN otherName:
|
||||
- `mosaic.federation.grantId` (UUID)
|
||||
- `mosaic.federation.subjectUserId` (user on the **serving** side that this grant acts-as)
|
||||
- Default lifetime: **30 days**, with auto-renewal at T-7 days if the grant is still active.
|
||||
|
||||
**Step-CA topology (v1):** Each server runs its own Step-CA instance. During onboarding, the serving side imports the requesting side's CA root. A central/shared Step-CA is out of scope for v1.
|
||||
|
||||
**Handshake:**
|
||||
|
||||
1. Client (A) opens HTTPS to B with its grant cert.
|
||||
2. B validates cert chain against trusted CA roots for that grant.
|
||||
3. B extracts `grantId` and `subjectUserId` from the cert.
|
||||
4. B loads the grant record, checks it is `active`, not revoked, and not expired.
|
||||
5. B enforces scope and rate-limit for this grant.
|
||||
6. Request proceeds; response returned.
|
||||
|
||||
## 7. Data Model
|
||||
|
||||
All tables live on **each instance's own Postgres**. Federation grants are bilateral — each side has a record of the grant.
|
||||
|
||||
### 7.1 `federation_grants` (on serving side, Server B)
|
||||
|
||||
| Field | Type | Notes |
|
||||
| --------------------------- | ----------- | ------------------------------------------------- |
|
||||
| `id` | uuid PK | |
|
||||
| `subject_user_id` | uuid FK | Which local user this grant acts-as |
|
||||
| `requesting_server` | text | Hostname of requesting gateway (e.g., woltje.com) |
|
||||
| `requesting_ca_fingerprint` | text | SHA-256 of trusted CA root |
|
||||
| `active_cert_fingerprint` | text | SHA-256 of currently valid client cert |
|
||||
| `scope` | jsonb | See §8 |
|
||||
| `rate_limit_rpm` | int | Default 60 |
|
||||
| `status` | enum | `pending`, `active`, `suspended`, `revoked` |
|
||||
| `created_at` | timestamptz | |
|
||||
| `activated_at` | timestamptz | |
|
||||
| `revoked_at` | timestamptz | |
|
||||
| `last_used_at` | timestamptz | |
|
||||
| `notes` | text | Admin-visible description |
|
||||
|
||||
### 7.2 `federation_peers` (on requesting side, Server A)
|
||||
|
||||
| Field | Type | Notes |
|
||||
| --------------------- | ----------- | ------------------------------------------------ |
|
||||
| `id` | uuid PK | |
|
||||
| `peer_hostname` | text | e.g., `mosaic.uscllc.com` |
|
||||
| `peer_ca_fingerprint` | text | SHA-256 of peer's CA root |
|
||||
| `grant_id` | uuid | The grant ID assigned by the peer |
|
||||
| `local_user_id` | uuid FK | Who on Server A this federation belongs to |
|
||||
| `client_cert_pem` | text (enc) | Current client cert (PEM); rotated automatically |
|
||||
| `client_key_pem` | text (enc) | Private key (encrypted at rest) |
|
||||
| `cert_expires_at` | timestamptz | |
|
||||
| `status` | enum | `pending`, `active`, `degraded`, `revoked` |
|
||||
| `last_success_at` | timestamptz | |
|
||||
| `last_failure_at` | timestamptz | |
|
||||
| `notes` | text | |
|
||||
|
||||
### 7.3 `federation_audit_log` (on serving side, Server B)
|
||||
|
||||
| Field | Type | Notes |
|
||||
| ------------- | ----------- | ------------------------------------------------ |
|
||||
| `id` | uuid PK | |
|
||||
| `grant_id` | uuid FK | |
|
||||
| `occurred_at` | timestamptz | indexed |
|
||||
| `verb` | text | `query`, `handshake`, `rejected`, `rate_limited` |
|
||||
| `resource` | text | e.g., `tasks`, `notes`, `credentials` |
|
||||
| `query_hash` | text | SHA-256 of normalized query (no payload stored) |
|
||||
| `outcome` | text | `ok`, `denied`, `error` |
|
||||
| `bytes_out` | int | |
|
||||
| `latency_ms` | int | |
|
||||
|
||||
**Audit policy:** Every federation request is logged on the serving side. Read-only requests only — no body capture. Retention: 90 days hot, then roll to cold storage.
|
||||
|
||||
## 8. RBAC & Scope
|
||||
|
||||
Every federation grant has a scope object that answers three questions for every inbound request:
|
||||
|
||||
1. **Who is acting?** — `subject_user_id` from the cert.
|
||||
2. **What resources?** — an allowlist of resource types (`tasks`, `notes`, `credentials`, `memory`, `teams/:id/tasks`, …).
|
||||
3. **Filter expression** — predicates applied on top of the subject's normal RBAC (see below).
|
||||
|
||||
### 8.1 Scope schema
|
||||
|
||||
```json
|
||||
{
|
||||
"resources": ["tasks", "notes", "memory"],
|
||||
"filters": {
|
||||
"tasks": { "include_teams": ["team_uuid_1", "team_uuid_2"], "include_personal": true },
|
||||
"notes": { "include_personal": true, "include_teams": [] },
|
||||
"memory": { "include_personal": true }
|
||||
},
|
||||
"excluded_resources": ["credentials", "api_keys"],
|
||||
"max_rows_per_query": 500
|
||||
}
|
||||
```
|
||||
|
||||
### 8.2 Access rule (enforced on serving side)
|
||||
|
||||
For every inbound federated query on resource R:
|
||||
|
||||
1. Resolve effective identity → `subject_user_id`.
|
||||
2. Check R is in `scope.resources` and NOT in `scope.excluded_resources`. Otherwise 403.
|
||||
3. Evaluate the user's **normal RBAC** (what would they see if they logged into Server B directly)?
|
||||
4. Intersect with the scope filter (e.g., only team X, only personal).
|
||||
5. Apply `max_rows_per_query`.
|
||||
6. Return; log to audit.
|
||||
|
||||
### 8.3 Team boundary guarantees
|
||||
|
||||
- Scope filters are additive, never subtractive of the native RBAC. A grant cannot grant access the user would not have had themselves.
|
||||
- `include_teams` means "only these teams," not "these teams in addition to all teams."
|
||||
- `include_personal: false` hides the user's personal data entirely from federation, even if they own it — useful for work-only accounts.
|
||||
|
||||
### 8.4 No cross-user leakage
|
||||
|
||||
When Server B has multiple users (employees) all federating back to their own Server A:
|
||||
|
||||
- Each employee has their own grant with their own `subject_user_id`.
|
||||
- The cert is bound to a specific grant; there is no mechanism by which one grant's cert can be used to impersonate another.
|
||||
- Audit log is per-grant.
|
||||
|
||||
## 9. Query Model
|
||||
|
||||
Federation exposes a **narrow read API**, not arbitrary SQL.
|
||||
|
||||
### 9.1 Supported verbs (v1)
|
||||
|
||||
| Verb | Purpose | Returns |
|
||||
| -------------- | ------------------------------------------ | ------------------------------- |
|
||||
| `list` | Paginated list of a resource type | Array of resources |
|
||||
| `get` | Fetch a single resource by id | One resource or 404 |
|
||||
| `search` | Keyword search within allowed resources | Ranked list of hits |
|
||||
| `capabilities` | What this grant is allowed to do right now | Scope object + rate-limit state |
|
||||
|
||||
### 9.2 Not in v1
|
||||
|
||||
- Write verbs.
|
||||
- Aggregations / analytics.
|
||||
- Streaming / subscriptions (future: see §13).
|
||||
|
||||
### 9.3 Agent-facing integration
|
||||
|
||||
Agents never call federation directly. Instead:
|
||||
|
||||
- The gateway query layer accepts `source: "local" | "federated:<peer_hostname>" | "all"`.
|
||||
- `"all"` fans out in parallel, merges results, tags each with `_source`.
|
||||
- Federation results are in-memory only; the gateway does not persist them.
|
||||
|
||||
## 10. Caching
|
||||
|
||||
- **In-memory response cache** with short TTL (default 30s) for `list` and `get`. `search` is not cached.
|
||||
- Cache is keyed by `(grant_id, verb, resource, query_hash)`.
|
||||
- Cache is flushed on cert rotation and on grant revocation.
|
||||
- No disk cache. No cross-session cache.
|
||||
|
||||
## 11. Bootstrap & Onboarding
|
||||
|
||||
### 11.1 Instance capability tiers
|
||||
|
||||
| Tier | Storage | Queue | Memory | Can federate? |
|
||||
| ------------ | -------- | ------- | -------- | --------------------- |
|
||||
| `local` | PGlite | in-proc | keyword | No |
|
||||
| `standalone` | Postgres | Valkey | keyword | No (can be client) |
|
||||
| `federated` | Postgres | Valkey | pgvector | Yes (server + client) |
|
||||
|
||||
Federation requires `federated` tier on **both** sides.
|
||||
|
||||
### 11.2 Onboarding flow (admin-driven)
|
||||
|
||||
1. Admin on Server B runs `mosaic federation grant create --user <user-id> --peer <peer-hostname> --scope-file scope.json`.
|
||||
2. Server B generates a `grant_id`, prints a one-time enrollment URL containing the grant ID + B's CA root fingerprint.
|
||||
3. Admin on Server A (or the user themselves, if allowed) runs `mosaic federation peer add <enrollment-url>`.
|
||||
4. Server A's Step-CA generates a CSR for the new grant. A submits the CSR to B over a short-lived enrollment endpoint (single-use token in the enrollment URL).
|
||||
5. B's Step-CA signs the cert (with grant ID embedded in SAN OIDs), returns it.
|
||||
6. A stores the signed cert + private key (encrypted) in `federation_peers`.
|
||||
7. Grant status flips from `pending` to `active` on both sides.
|
||||
8. Cert auto-renews at T-7 days using the standard Step-CA renewal flow as long as the grant remains active.
|
||||
|
||||
### 11.3 Revocation
|
||||
|
||||
- **Admin-initiated:** `mosaic federation grant revoke <grant-id>` on B flips status to `revoked`, adds the cert to B's CRL, and writes an audit entry.
|
||||
- **Revoke-on-delete:** Deleting a user on B automatically revokes all grants where that user is the subject.
|
||||
- Server A learns of revocation on the next request (TLS handshake fails) and flips the peer to `revoked`.
|
||||
|
||||
### 11.4 Rate limit
|
||||
|
||||
Default `60 req/min` per grant. Configurable per grant. Enforced at the serving side. A rate-limited request returns `429` with `Retry-After`.
|
||||
|
||||
## 12. Operational Concerns
|
||||
|
||||
- **Observability:** Each federation request emits an OTEL span with `grant_id`, `peer`, `verb`, `resource`, `outcome`, `latency_ms`. Traces correlate across both servers via W3C traceparent.
|
||||
- **Health check:** `mosaic federation status` on each side shows active grants, last-success times, cert expirations, and any CRL mismatches.
|
||||
- **Backpressure:** If the serving side is overloaded, it returns `503` with a structured body; the client marks the peer `degraded` and falls back to local-only until the next successful handshake.
|
||||
- **Secrets:** `client_key_pem` in `federation_peers` is encrypted with the gateway's key (sealed with the instance's master key — same mechanism as `provider_credentials`).
|
||||
- **Credentials never cross:** The `credentials` resource type is in the default excluded list. It must be explicitly added to scope (admin action, logged) and even then is per-grant and per-user.
|
||||
|
||||
## 13. Future (post-v1)
|
||||
|
||||
- B→A push (e.g., "notify A when a task assigned to subject changes") via Socket.IO over mTLS.
|
||||
- Mesh (N-to-N) federation.
|
||||
- Write verbs with conflict resolution.
|
||||
- Shared Step-CA (a "root of roots") so that onboarding doesn't require exchanging CA roots.
|
||||
- Federated memory search over vector indexes with homomorphic filtering.
|
||||
|
||||
## 14. Locked Decisions (was "Open Questions")
|
||||
|
||||
| # | Question | Decision |
|
||||
| --- | ------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 1 | What happens to a grant when its subject user is deleted? | **Revoke-on-delete.** All grants where the user is subject are auto-revoked and CRL'd. |
|
||||
| 2 | Do we audit read-only requests? | **Yes.** All federated reads are audited on the serving side. Bodies are not captured; query hash + metadata only. |
|
||||
| 3 | Default rate limit? | **60 requests per minute per grant,** override-able per grant. |
|
||||
| 4 | How do we verify the requesting-server's identity beyond the grant token? | **X.509 client cert tied to the user,** issued by Step-CA (per-server) or locally generated. Cert subject carries `grantId` + `subjectUserId`. |
|
||||
|
||||
### M1 decisions
|
||||
|
||||
- **Postgres deployment:** **Containerized** alongside the gateway in M1 (Docker Compose profile). Moving to a dedicated host is a M5+ operational concern, not a v1 feature.
|
||||
- **Instance signing key:** **Separate** from the Step-CA key. Step-CA signs federation certs; the instance master key seals at-rest secrets (client keys, provider credentials). Different blast-radius, different rotation cadences.
|
||||
|
||||
## 15. Acceptance Criteria
|
||||
|
||||
- [ ] Two Mosaic Stack gateways on different hosts can establish a federation grant via the CLI-driven onboarding flow.
|
||||
- [ ] Server A can query Server B for `tasks`, `notes`, `memory` respecting scope filters.
|
||||
- [ ] A user on B with no grant cannot be queried by A, even if A has a valid grant for another user.
|
||||
- [ ] Revoking a grant on B causes A's next request to fail with a clear error within one request cycle.
|
||||
- [ ] Cert rotation happens automatically at T-7 days; an in-progress session survives rotation without user action.
|
||||
- [ ] Rate-limit enforcement returns 429 with `Retry-After`; client backs off.
|
||||
- [ ] With B unreachable, a session on A completes using local data and surfaces a "federation offline for `<peer>`" signal once.
|
||||
- [ ] Every federated request appears in B's `federation_audit_log` within 1 second.
|
||||
- [ ] A scope excluding `credentials` means credentials are not returnable even via `search` with matching keywords.
|
||||
- [ ] `mosaic federation status` shows cert expiry, grant status, and last success/failure per peer.
|
||||
|
||||
## 16. Implementation Milestones (reference)
|
||||
|
||||
Milestones live in `docs/federation/MILESTONES.md` (to be authored next). High-level:
|
||||
|
||||
- **M1:** Server A runs `federated` tier standalone (Postgres + Valkey + pgvector, containerized). No peer yet.
|
||||
- **M2:** Step-CA embedded; `federation_grants` / `federation_peers` schema + admin CLI.
|
||||
- **M3:** Handshake + `list`/`get` verbs with scope enforcement.
|
||||
- **M4:** `search` verb, audit log, rate limits.
|
||||
- **M5:** Cache layer, offline-degradation UX, observability surfaces.
|
||||
- **M6:** Revocation flows (admin + revoke-on-delete), cert auto-renewal.
|
||||
- **M7:** Multi-user RBAC hardening on B, team-scoped grants end-to-end, acceptance suite green.
|
||||
|
||||
---
|
||||
|
||||
**Next step after PRD sign-off:** author `docs/federation/MILESTONES.md` with per-milestone acceptance tests and estimated token budget, then file tracking issues on `git.mosaicstack.dev/mosaicstack/stack`.
|
||||
@@ -1,76 +0,0 @@
|
||||
# Tasks — Federation v1
|
||||
|
||||
> Single-writer: orchestrator only. Workers read but never modify.
|
||||
>
|
||||
> **Mission:** federation-v1-20260419
|
||||
> **Schema:** `| id | status | description | issue | agent | branch | depends_on | estimate | notes |`
|
||||
> **Status values:** `not-started` | `in-progress` | `done` | `blocked` | `failed` | `needs-qa`
|
||||
> **Agent values:** `codex` | `glm-5.1` | `haiku` | `sonnet` | `opus` | `—` (auto)
|
||||
>
|
||||
> **Scope of this file:** M1 is fully decomposed below. M2–M7 are placeholders pending each milestone's entry into active planning — the orchestrator expands them one milestone at a time to avoid speculative decomposition of work whose shape will depend on what M1 surfaces.
|
||||
|
||||
---
|
||||
|
||||
## Milestone 1 — Federated tier infrastructure (FED-M1)
|
||||
|
||||
Goal: Gateway runs in `federated` tier with containerized PG+pgvector+Valkey. No federation logic yet. Existing standalone behavior does not regress.
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------- | ---------- | -------- | --------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| FED-M1-01 | done | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | sonnet | feat/federation-m1-tier-config | — | 4K | Shipped in PR #470. Renamed `team` → `standalone`; added `team` deprecation alias; added `DEFAULT_FEDERATED_CONFIG`. |
|
||||
| FED-M1-02 | in-progress | Author `docker-compose.federated.yml` as an overlay profile: Postgres 17 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | sonnet | feat/federation-m1-compose | FED-M1-01 | 5K | Bumped PG16→PG17 to match base compose. Overlay defines distinct `postgres-federated`/`valkey-federated` services, profile-gated. |
|
||||
| FED-M1-03 | not-started | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | codex | feat/federation-m1-pgvector | FED-M1-02 | 8K | Extension create is idempotent `CREATE EXTENSION IF NOT EXISTS vector`. Gate on tier = federated. |
|
||||
| FED-M1-04 | not-started | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | codex | feat/federation-m1-detector | FED-M1-03 | 8K | Structured error type with remediation hints. Logs which service failed, with host:port attempted. |
|
||||
| FED-M1-05 | not-started | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | codex | feat/federation-m1-migrate | FED-M1-04 | 10K | Do NOT run automatically. CLI subcommand `mosaic migrate tier --to federated --dry-run`. Safety rails. |
|
||||
| FED-M1-06 | not-started | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Existing doctor output evolves; add `--json` flag. Green/yellow/red + remediation suggestions per issue. |
|
||||
| FED-M1-07 | not-started | Integration test: gateway boots in `federated` tier with docker-compose `federated` profile; refuses to boot when PG unreachable (asserts fail-fast); pgvector extension query succeeds. | #460 | sonnet | feat/federation-m1-integration | FED-M1-04 | 8K | Vitest + docker-compose test profile. One test file per assertion; real services, no mocks. |
|
||||
| FED-M1-08 | not-started | Integration test for migration script: seed a local PGlite with representative data (tasks, notes, users, teams), run migration, assert row counts + key samples equal on federated PG. | #460 | sonnet | feat/federation-m1-migrate-test | FED-M1-05 | 6K | Runs against docker-compose federated profile; uses temp PGlite file; deterministic seed. |
|
||||
| FED-M1-09 | not-started | Standalone regression: full agent-session E2E on existing `standalone` tier with a gateway built from this branch. Must pass without referencing any federation module. | #460 | haiku | feat/federation-m1-regression | FED-M1-07 | 4K | Reuse existing e2e harness; just re-point at the federation branch build. Canary that we didn't break it. |
|
||||
| FED-M1-10 | not-started | Code review pass: security-focused on the migration script (data-at-rest during migration) + tier detector (error-message sensitivity leakage). Independent reviewer, not authors of tasks 01-09. | #460 | sonnet | — | FED-M1-09 | 8K | Use `feature-dev:code-reviewer` agent. Specifically: no secrets in error messages; no partial-migration footguns. |
|
||||
| FED-M1-11 | not-started | Docs update: `docs/federation/` operator notes for tier setup; README blurb on federated tier; `docs/guides/` entry for migration. Do NOT touch runbook yet (deferred to FED-M7). | #460 | haiku | feat/federation-m1-docs | FED-M1-10 | 4K | Short, actionable. Link from MISSION-MANIFEST. No decisions captured here — those belong in PRD. |
|
||||
| FED-M1-12 | not-started | PR, CI green, merge to main, close #460. | #460 | — | (aggregate) | FED-M1-11 | 3K | Queue-guard before push; wait for green; merge squashed; tea `issue-close` #460. |
|
||||
|
||||
**M1 total estimate:** ~74K tokens (over-budget vs 20K PRD estimate — explanation below)
|
||||
|
||||
**Why over-budget:** PRD's 20K estimate reflected implementation complexity only. The per-task breakdown includes tests, review, and docs as separate tasks per the delivery cycle, which catches the real cost. The final per-milestone budgets in MISSION-MANIFEST will be updated after M1 completes with actuals.
|
||||
|
||||
---
|
||||
|
||||
## Milestone 2 — Step-CA + grant schema + admin CLI (FED-M2)
|
||||
|
||||
_Deferred to mission planning when M1 is complete. Issue #461 tracks scope._
|
||||
|
||||
## Milestone 3 — mTLS handshake + list/get + scope enforcement (FED-M3)
|
||||
|
||||
_Deferred. Issue #462._
|
||||
|
||||
## Milestone 4 — search + audit + rate limit (FED-M4)
|
||||
|
||||
_Deferred. Issue #463._
|
||||
|
||||
## Milestone 5 — cache + offline + OTEL (FED-M5)
|
||||
|
||||
_Deferred. Issue #464._
|
||||
|
||||
## Milestone 6 — revocation + auto-renewal + CRL (FED-M6)
|
||||
|
||||
_Deferred. Issue #465._
|
||||
|
||||
## Milestone 7 — multi-user hardening + acceptance suite (FED-M7)
|
||||
|
||||
_Deferred. Issue #466._
|
||||
|
||||
---
|
||||
|
||||
## Execution Notes
|
||||
|
||||
**Agent assignment rationale:**
|
||||
|
||||
- `codex` for most implementation tasks (OpenAI credit pool preferred for feature code)
|
||||
- `sonnet` for tests (pattern-based, moderate complexity), `doctor` work (cross-cutting), and independent code review
|
||||
- `haiku` for docs and the standalone regression canary (cheapest tier for mechanical/verification work)
|
||||
- No `opus` in M1 — save for cross-cutting architecture decisions if they surface later
|
||||
|
||||
**Branch strategy:** Each task gets its own feature branch off `main`. Tasks within a milestone merge in dependency order. Final aggregate PR (FED-M1-12) isn't a branch of its own — it's the merge of the last upstream task that closes the issue.
|
||||
|
||||
**Queue guard:** Every push and every merge in this mission must run `~/.config/mosaic/tools/git/ci-queue-wait.sh --purpose push|merge` per Mosaic hard gate #6.
|
||||
@@ -266,80 +266,3 @@ Issues closed: #52, #55, #57, #58, #120-#134
|
||||
**P8-018 closed:** Spin-off stubs created (gatekeeper-service.md, task-queue-unification.md, chroot-sandboxing.md)
|
||||
|
||||
**Next:** Begin execution at Wave 1 — P8-007 (DB migrations) + P8-008 (Types) in parallel.
|
||||
|
||||
---
|
||||
|
||||
### Session 15 — 2026-04-19 — MVP Rollup Manifest Authored
|
||||
|
||||
| Session | Date | Milestone | Tasks Done | Outcome |
|
||||
| ------- | ---------- | -------------- | ------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| 15 | 2026-04-19 | (rollup-level) | MVP-T01 (manifest), MVP-T02 (archive iuv-v2), MVP-T03 (land FED planning) | Authored MVP rollup manifest at `docs/MISSION-MANIFEST.md`. Federation v1 planning merged to `main` (PR #468 / commit `66512550`). Install-ux-v2 archived as complete. |
|
||||
|
||||
**Gap context:** The MVP scratchpad was last updated at Session 14 (2026-03-15). In the intervening month, two sub-missions ran outside the MVP framework: `install-ux-hardening` (complete, `mosaic-v0.0.25`) and `install-ux-v2` (complete on 2026-04-19, `0.0.27` → `0.0.29`). Both archived under `docs/archive/missions/`. The phase-based execution from Sessions 1–14 (Phases 0–8, issues #1–#172) substantially shipped during this window via those sub-missions and standalone PRs — the MVP mission was nominally active but had no rollup manifest tracking it.
|
||||
|
||||
**User reframe (this session):**
|
||||
|
||||
> There will be more in the MVP. This will inevitably become scope creep. I need a solution that works via webUI, TUI, CLI, and just works for MVP. Federation is required because I need it to work NOW, so my disparate jarvis-brain usage can be consolidated properly.
|
||||
|
||||
**Decisions:**
|
||||
|
||||
1. **MVP is the rollup mission**, not a single-purpose mission. Federation v1 is one workstream of MVP, not MVP itself. Phase 0–8 work is preserved as historical context but is no longer the primary control plane.
|
||||
2. **Three-surface parity (webUI / TUI / CLI) is a cross-cutting MVP requirement** (MVP-X1), not a workstream. Encoded explicitly so it can't be silently dropped.
|
||||
3. **Scope creep is named and accommodated.** Manifest has explicit "Likely Additional Workstreams" section listing PRD-derived candidates without committing execution capacity to them.
|
||||
4. **Workstream isolation** — each workstream gets its own manifest under `docs/{workstream}/MISSION-MANIFEST.md`. MVP manifest is rollup only.
|
||||
5. **Archive-don't-delete** — install-ux-v2 manifest moved to `docs/archive/missions/install-ux-v2-20260405/` with status corrected to `complete` (IUV-M03 closeout note added pointing at PR #446 + releases 0.0.27 → 0.0.29).
|
||||
6. **Federation planning landed first** — PR #468 merged before MVP manifest authored, so the manifest references real on-`main` artifacts.
|
||||
|
||||
**Open items:**
|
||||
|
||||
- `.mosaic/orchestrator/mission.json` MVP slot remains empty (zero milestones). Tracked as MVP-T04. Defer until next session — does not block W1 kickoff. Open question: hand-edit vs. `mosaic coord init` reinit.
|
||||
- Additional workstreams (web dashboard parity, TUI/CLI completion, remote control, multi-user/SSO, LLM provider expansion, MCP, brain) anticipated per PRD but not declared. Pre-staged in manifest's "Likely Additional Workstreams" list.
|
||||
|
||||
**Artifacts this session:**
|
||||
|
||||
| Artifact | Status |
|
||||
| -------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ |
|
||||
| PR #468 (`docs(federation): PRD, milestones, mission manifest, and M1 task breakdown`) | merged 2026-04-19 → `main` (commit `66512550`) |
|
||||
| `docs/MISSION-MANIFEST.md` (MVP rollup, replaces install-ux-v2 manifest) | authored on `docs/mvp-mission-manifest` branch |
|
||||
| `docs/TASKS.md` (MVP rollup, points at workstream task files) | authored |
|
||||
| Install-ux-v2 manifest + tasks + scratchpad + iuv-m03-design | moved to `docs/archive/missions/install-ux-v2-20260405/` with status corrected to complete |
|
||||
|
||||
**Next:** PR `docs/mvp-mission-manifest` → merge to `main` → next session begins W1 / FED-M1 from clean state.
|
||||
|
||||
---
|
||||
|
||||
## Session 16 — 2026-04-19 — claude
|
||||
|
||||
**Mode:** Delivery (W1 / FED-M1 execution)
|
||||
**Branch:** `feat/federation-m1-tier-config`
|
||||
**Context budget:** 200K, currently ~45% used (compaction-aware)
|
||||
|
||||
**Goal:** FED-M1-01 — extend `mosaic.config.json` schema: add `"federated"` to tier enum.
|
||||
|
||||
**Critical reconciliation surfaced during pre-flight:**
|
||||
|
||||
The federation PRD (`docs/federation/PRD.md` line 247) defines three tiers: `local | standalone | federated`.
|
||||
The existing code (`packages/config/src/mosaic-config.ts`, `packages/mosaic/src/types.ts`, `packages/mosaic/src/stages/gateway-config.ts`) uses `local | team`.
|
||||
|
||||
`team` is the same conceptual tier as PRD `standalone` (Postgres + Valkey, no pgvector). Rather than carrying a confusing alias forever, FED-M1-01 will rename `team` → `standalone` and add `federated` as a third value, so all downstream federation work has a coherent vocabulary.
|
||||
|
||||
Affected files (storage-tier semantics only — Team/workspace usages unaffected):
|
||||
|
||||
- `packages/config/src/mosaic-config.ts` (StorageTier type, validator enum, defaults)
|
||||
- `packages/mosaic/src/types.ts` (GatewayStorageTier)
|
||||
- `packages/mosaic/src/stages/gateway-config.ts` (~10 references)
|
||||
- `packages/mosaic/src/stages/gateway-config.spec.ts` (test references)
|
||||
- Possibly `tools/e2e-install-test.sh` (referenced grep) and headless env hint string
|
||||
|
||||
**Worker plan:**
|
||||
|
||||
1. Spawn sonnet subagent with explicit task spec + the reconciliation context above.
|
||||
2. Worker delivers diff; orchestrator runs `pnpm typecheck && pnpm lint && pnpm format:check`.
|
||||
3. Independent `feature-dev:code-reviewer` subagent reviews diff.
|
||||
4. Second independent verification subagent (general-purpose, sonnet) verifies reviewer's claims and confirms all `'team'` storage-tier references migrated, no `Team`/workspace bleed.
|
||||
5. Open PR via tea CLI; wait for CI; queue-guard; squash merge; record actuals.
|
||||
|
||||
**Open items:**
|
||||
|
||||
- `MVP-T04` (sync `.mosaic/orchestrator/mission.json`) still deferred.
|
||||
- `team` tier rename touches install wizard headless env vars (`MOSAIC_STORAGE_TIER=team`); will need 0.0.x deprecation note in scratchpad if release notes are written this milestone.
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
# Hotfix Scratchpad — `install.sh` does not seed `TOOLS.md`
|
||||
|
||||
- **Issue:** mosaicstack/stack#457
|
||||
- **Branch:** `fix/tools-md-seeding`
|
||||
- **Type:** Out-of-mission hotfix (not part of Install UX v2 mission)
|
||||
- **Started:** 2026-04-11
|
||||
- **Ships in:** `@mosaicstack/mosaic` 0.0.30
|
||||
|
||||
## Objective
|
||||
|
||||
Ensure `~/.config/mosaic/TOOLS.md` is created on every supported install path so the mandatory AGENTS.md load order actually resolves. The load order lists `TOOLS.md` at position 5 but the bash installer never seeds it.
|
||||
|
||||
## Root cause
|
||||
|
||||
`packages/mosaic/framework/install.sh:228-236` — the post-sync "Seed defaults" loop explicitly lists `AGENTS.md STANDARDS.md`:
|
||||
|
||||
```bash
|
||||
DEFAULTS_DIR="$TARGET_DIR/defaults"
|
||||
if [[ -d "$DEFAULTS_DIR" ]]; then
|
||||
for default_file in AGENTS.md STANDARDS.md; do # ← missing TOOLS.md
|
||||
if [[ -f "$DEFAULTS_DIR/$default_file" ]] && [[ ! -f "$TARGET_DIR/$default_file" ]]; then
|
||||
cp "$DEFAULTS_DIR/$default_file" "$TARGET_DIR/$default_file"
|
||||
ok "Seeded $default_file from defaults"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
```
|
||||
|
||||
`TOOLS.md` is listed in `PRESERVE_PATHS` (line 24) but never created in the first place. A fresh bootstrap install via `tools/install.sh → framework/install.sh` leaves `~/.config/mosaic/TOOLS.md` absent, and the agent load order then points at a missing file.
|
||||
|
||||
### Secondary: TypeScript `syncFramework` is too greedy
|
||||
|
||||
`packages/mosaic/src/config/file-adapter.ts:133-160` — `FileConfigAdapter.syncFramework` correctly seeds TOOLS.md, but it does so by iterating _every_ file in `framework/defaults/`:
|
||||
|
||||
```ts
|
||||
for (const entry of readdirSync(defaultsDir)) {
|
||||
const dest = join(this.mosaicHome, entry);
|
||||
if (!existsSync(dest)) {
|
||||
copyFileSync(join(defaultsDir, entry), dest);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`framework/defaults/` contains:
|
||||
|
||||
```
|
||||
AGENTS.md
|
||||
AUDIT-2026-02-17-framework-consistency.md
|
||||
README.md
|
||||
SOUL.md ← hardcoded "Jarvis"
|
||||
STANDARDS.md
|
||||
TOOLS.md
|
||||
USER.md
|
||||
```
|
||||
|
||||
So on a fresh install the TS wizard would silently copy the `Jarvis`-flavored `SOUL.md` + placeholder `USER.md` + internal `AUDIT-*.md` and `README.md` into the user's mosaic home before `mosaic init` ever prompts them. That's a latent identity bug as well as a root-clutter bug — the wizard's own stages are responsible for generating `SOUL.md`/`USER.md` via templates.
|
||||
|
||||
### Tertiary: stale `TOOLS.md.template`
|
||||
|
||||
`packages/mosaic/framework/templates/TOOLS.md.template` still references `~/.config/mosaic/rails/git/…` and `~/.config/mosaic/rails/codex/…`. The `rails/` tree was renamed to `tools/` in the v1→v2 migration (see `run_migrations` in `install.sh`, which removes the old `rails/` symlink). Any user who does run `mosaic init` ends up with a `TOOLS.md` that points to paths that no longer exist.
|
||||
|
||||
## Scope of this fix
|
||||
|
||||
1. **`packages/mosaic/framework/install.sh`** — extend the explicit seed list to include `TOOLS.md`.
|
||||
2. **`packages/mosaic/src/config/file-adapter.ts`** — restrict `syncFramework` defaults-seeding to an explicit whitelist (`AGENTS.md`, `STANDARDS.md`, `TOOLS.md`) so the TS wizard never accidentally seeds `SOUL.md`/`USER.md`/`README.md`/`AUDIT-*.md` into the mosaic home.
|
||||
3. **`packages/mosaic/framework/templates/TOOLS.md.template`** — replace `rails/` with `tools/` in the wrapper-path examples (minimal surgical fix; full template modernization is out of scope for a 0.0.30 hotfix).
|
||||
4. **Regression test** — unit test around `FileConfigAdapter.syncFramework` that runs against a tmpdir fixture asserting:
|
||||
- `TOOLS.md` is seeded when absent
|
||||
- `AGENTS.md` / `STANDARDS.md` are still seeded when absent
|
||||
- `SOUL.md` / `USER.md` are **not** seeded from `defaults/` (the wizard stages own those)
|
||||
- Existing root files are not clobbered.
|
||||
|
||||
Out of scope (tracked separately / future work):
|
||||
|
||||
- Regenerating `defaults/SOUL.md` and `defaults/USER.md` so they no longer contain Jarvis-specific content.
|
||||
- Fully modernizing `TOOLS.md.template` to match the rich canonical `defaults/TOOLS.md` reference.
|
||||
- `issue-create.sh` / `pr-create.sh` `eval` bugs (already captured to OpenBrain from the prior hotfix).
|
||||
|
||||
## Plan / checklist
|
||||
|
||||
- [ ] Branch `fix/tools-md-seeding` from `main` (at `b2cbf89`)
|
||||
- [ ] File Gitea issue (direct API; wrappers broken for bodies with backticks)
|
||||
- [ ] Scratchpad created (this file)
|
||||
- [ ] `install.sh` seed loop extended to `AGENTS.md STANDARDS.md TOOLS.md`
|
||||
- [ ] `file-adapter.ts` seeding restricted to explicit whitelist
|
||||
- [ ] `TOOLS.md.template` `rails/` → `tools/`
|
||||
- [ ] Regression test added (`file-adapter.test.ts`) — failing first, then green
|
||||
- [ ] `pnpm --filter @mosaicstack/mosaic run typecheck` green
|
||||
- [ ] `pnpm --filter @mosaicstack/mosaic run lint` green
|
||||
- [ ] `pnpm --filter @mosaicstack/mosaic exec vitest run` — new test green, no new failures beyond the known pre-existing `uninstall.spec.ts:138`
|
||||
- [ ] Repo baselines: `pnpm typecheck` / `pnpm lint` / `pnpm format:check`
|
||||
- [ ] Independent code review (`feature-dev:code-reviewer`, sonnet tier)
|
||||
- [ ] Commit + push
|
||||
- [ ] PR opened via Gitea API
|
||||
- [ ] CI queue guard cleared (bypass local `ci-queue-wait.sh` if stale origin URL breaks it; query Gitea API directly)
|
||||
- [ ] CI green on PR
|
||||
- [ ] PR merged (squash)
|
||||
- [ ] CI green on main
|
||||
- [ ] Issue closed with link to merge commit
|
||||
- [ ] `chore/release-mosaic-0.0.30` branch bumps `packages/mosaic/package.json` 0.0.29 → 0.0.30
|
||||
- [ ] Release PR opened + merged
|
||||
- [ ] `.woodpecker/publish.yml` auto-publishes to Gitea npm registry
|
||||
- [ ] Publish verified (`npm view @mosaicstack/mosaic version` or registry check)
|
||||
|
||||
## Risks / blockers
|
||||
|
||||
- `ci-queue-wait.sh` wrapper may still crash on stale `origin` URL (captured in OpenBrain from prior hotfix). Workaround: query Gitea API directly for running/queued pipelines.
|
||||
- `issue-create.sh` / `pr-create.sh` `eval` bugs. Workaround: Gitea API direct call.
|
||||
- `uninstall.spec.ts:138` is a pre-existing failure on main; not this change's problem.
|
||||
- Publish flow is fire-and-forget on main push — if `publish.yml` fails, rollback means republishing a follow-up patch, not reverting the version bump.
|
||||
@@ -1,114 +0,0 @@
|
||||
# Hotfix Scratchpad — `mosaic yolo <runtime>` passes runtime name as initial user message
|
||||
|
||||
- **Issue:** mosaicstack/stack#454
|
||||
- **Branch:** `fix/yolo-runtime-initial-arg`
|
||||
- **Type:** Out-of-mission hotfix (not part of Install UX v2 mission)
|
||||
- **Started:** 2026-04-11
|
||||
|
||||
## Objective
|
||||
|
||||
Stop `mosaic yolo <runtime>` from passing the runtime name (`claude`, `codex`, etc.) as the initial user message to the underlying CLI. Restore the mission-auto-prompt path for yolo launches.
|
||||
|
||||
## Root cause (confirmed)
|
||||
|
||||
`packages/mosaic/src/commands/launch.ts:779` — the `yolo <runtime>` action handler:
|
||||
|
||||
```ts
|
||||
.action((runtime: string, _opts: unknown, cmd: Command) => {
|
||||
// ... validate runtime ...
|
||||
launchRuntime(runtime as RuntimeName, cmd.args, true);
|
||||
});
|
||||
```
|
||||
|
||||
Commander.js includes declared positional arguments in `cmd.args`. For `mosaic yolo claude`:
|
||||
|
||||
- `runtime` (destructured) = `"claude"`
|
||||
- `cmd.args` = `["claude"]` — the same value
|
||||
|
||||
`launchRuntime` treats `["claude"]` as excess positional args, and for the `claude` case that becomes the initial user message. As a secondary consequence, `hasMissionNoArgs` evaluates false, so the mission-auto-prompt path is bypassed too.
|
||||
|
||||
## Live reproduction (intercepted claude binary)
|
||||
|
||||
```
|
||||
$ PATH=/tmp/fake-claude-bin:$PATH mosaic yolo claude
|
||||
[mosaic] Launching Claude Code in YOLO mode...
|
||||
argv[1]: --dangerously-skip-permissions
|
||||
argv[2]: --append-system-prompt
|
||||
argv[3] (len=25601): # ACTIVE MISSION — HARD GATE ...
|
||||
argv[4]: claude ← the bug
|
||||
```
|
||||
|
||||
Non-yolo variant `mosaic claude` is clean:
|
||||
|
||||
```
|
||||
argv[1]: --append-system-prompt
|
||||
argv[2]: <prompt>
|
||||
argv[3]: Active mission detected: MVP. Read the mission state files and report status.
|
||||
```
|
||||
|
||||
## Plan
|
||||
|
||||
1. Refactor `launch.ts`: extract `registerRuntimeLaunchers(program, handler)` with an injectable handler so commander wiring is testable without spawning subprocesses. `registerLaunchCommands` delegates to it with `launchRuntime` as the handler.
|
||||
2. Fix: in the `yolo <runtime>` action, pass `cmd.args.slice(1)` instead of `cmd.args`.
|
||||
3. Add `packages/mosaic/src/commands/launch.spec.ts`:
|
||||
- Failing-first reproducer: parse `['node','x','yolo','claude']` and assert handler receives `extraArgs=[]` and `yolo=true`.
|
||||
- Regression test: parse `['node','x','claude']` asserts handler receives `extraArgs=[]` and `yolo=false`.
|
||||
- Excess args: parse `['node','x','yolo','claude','--print','hi']` asserts handler receives `extraArgs=['--print','hi']` (with `--print` kept because `allowUnknownOption` is true).
|
||||
- Excess args non-yolo: parse `['node','x','claude','--print','hi']` asserts `extraArgs=['--print','hi']`.
|
||||
- Reject unknown runtime under yolo.
|
||||
4. Run typecheck, lint, format:check, vitest for `@mosaicstack/mosaic`.
|
||||
5. Independent code review (feature-dev:code-reviewer subagent, sonnet tier).
|
||||
6. Commit → push → PR via wrappers → merge → CI green → close issue #454.
|
||||
7. Release decision (`mosaic-v0.0.30`) deferred to Jason after merge.
|
||||
|
||||
## Framework compliance sub-findings (out-of-scope; to capture in OpenBrain after)
|
||||
|
||||
- `~/.config/mosaic/tools/git/issue-create.sh` uses `eval` on `$BODY`; arbitrary bodies with backticks, `$`, or parens break catastrophically.
|
||||
- `gitea_issue_create_api` fallback uses `curl -fsS` without `-L`; after the `mosaicstack/mosaic-stack → mosaicstack/stack` rename, the API redirect is not followed and the fallback silently fails.
|
||||
- Local repo `origin` remote still points at old `mosaic/mosaic-stack.git` slug. Not touched here per git-config safety rule.
|
||||
- `~/.config/mosaic/TOOLS.md` referenced by the global load order but does not exist on disk.
|
||||
|
||||
These will be captured to OpenBrain after the hotfix merges so they don't get lost, and filed as separate tracking items.
|
||||
|
||||
## Progress checkpoints
|
||||
|
||||
- [x] Branch created (`fix/yolo-runtime-initial-arg`)
|
||||
- [x] Issue #454 opened
|
||||
- [x] Scratchpad scaffolded
|
||||
- [x] Failing test added (red)
|
||||
- [x] Refactor + fix applied
|
||||
- [x] Tests green (launch.spec.ts 11/11)
|
||||
- [x] Baselines green (typecheck, lint, format:check, vitest — pre-existing `uninstall.spec.ts:138` failure on branch main acknowledged, not caused by this change)
|
||||
- [x] Code review pass (feature-dev:code-reviewer, sonnet — no blockers)
|
||||
- [x] Commit + push (commit 1dd4f59)
|
||||
- [x] PR opened (mosaicstack/stack#455)
|
||||
- [x] CI queue guard cleared (no pending pipelines pre-push or pre-merge)
|
||||
- [x] PR merged (squash merge commit b2cec8c6bac29336a6cdcdb4f19806f7b5fa0054)
|
||||
- [x] CI green on main (`ci/woodpecker/push/ci` + `ci/woodpecker/push/publish` both success on merge commit)
|
||||
- [x] Issue #454 closed
|
||||
- [x] Scratchpad final evidence entry
|
||||
|
||||
## Tests run
|
||||
|
||||
- `pnpm --filter @mosaicstack/mosaic run typecheck` → green
|
||||
- `pnpm --filter @mosaicstack/mosaic run lint` → green
|
||||
- `pnpm --filter @mosaicstack/mosaic exec prettier --check "src/**/*.ts"` → green
|
||||
- `pnpm --filter @mosaicstack/mosaic exec vitest run src/commands/launch.spec.ts` → 11/11 pass
|
||||
- `pnpm --filter @mosaicstack/mosaic exec vitest run` → 270/271 pass (1 pre-existing `uninstall.spec.ts:138` EACCES failure, confirmed on the branch before this change)
|
||||
- `pnpm typecheck` (repo) → green
|
||||
- `pnpm lint` (repo) → green
|
||||
- `pnpm format:check` (repo) → green (after prettier-writing the scratchpad)
|
||||
|
||||
## Risks / blockers
|
||||
|
||||
None expected. Refactor is small and the Commander API is stable. Test needs `exitOverride()` to prevent `process.exit` on invalid runtime.
|
||||
|
||||
## Final verification evidence
|
||||
|
||||
- PR: mosaicstack/stack#455 — state `closed`, merged.
|
||||
- Merge commit: `b2cec8c6bac29336a6cdcdb4f19806f7b5fa0054` (squash to `main`).
|
||||
- Post-merge CI (main @ b2cec8c6): `ci/woodpecker/push/ci` = success, `ci/woodpecker/push/publish` = success. (`ci/woodpecker/tag/publish` was last observed as a pre-existing failure on the prior release tag and is unrelated to this change.)
|
||||
- Issue mosaicstack/stack#454 closed with a comment linking the merge commit.
|
||||
- Launch regression suite: `launch.spec.ts` 11/11 pass on main.
|
||||
- Baselines on main after merge are inherited from the PR CI run.
|
||||
- Release decision (`mosaicstack/mosaic` 0.0.30) intentionally deferred to the user — the fix is now sitting on main awaiting a release cut.
|
||||
@@ -1,9 +1,7 @@
|
||||
export type { MosaicConfig, StorageTier, MemoryConfigRef } from './mosaic-config.js';
|
||||
export {
|
||||
DEFAULT_LOCAL_CONFIG,
|
||||
DEFAULT_STANDALONE_CONFIG,
|
||||
DEFAULT_FEDERATED_CONFIG,
|
||||
DEFAULT_TEAM_CONFIG,
|
||||
loadConfig,
|
||||
validateConfig,
|
||||
detectFromEnv,
|
||||
} from './mosaic-config.js';
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
validateConfig,
|
||||
detectFromEnv,
|
||||
DEFAULT_LOCAL_CONFIG,
|
||||
DEFAULT_STANDALONE_CONFIG,
|
||||
DEFAULT_FEDERATED_CONFIG,
|
||||
} from './mosaic-config.js';
|
||||
|
||||
describe('validateConfig — tier enum', () => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let stderrSpy: any;
|
||||
|
||||
beforeEach(() => {
|
||||
stderrSpy = vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
stderrSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('accepts tier="local"', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'local',
|
||||
storage: { type: 'pglite', dataDir: '.mosaic/storage-pglite' },
|
||||
queue: { type: 'local', dataDir: '.mosaic/queue' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
expect(result.tier).toBe('local');
|
||||
});
|
||||
|
||||
it('accepts tier="standalone"', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
expect(result.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('accepts tier="federated"', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'federated',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'pgvector' },
|
||||
});
|
||||
expect(result.tier).toBe('federated');
|
||||
});
|
||||
|
||||
it('accepts deprecated tier="team" as alias for "standalone" and emits a deprecation warning', () => {
|
||||
const result = validateConfig({
|
||||
tier: 'team',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
expect(result.tier).toBe('standalone');
|
||||
expect(stderrSpy).toHaveBeenCalledWith(expect.stringContaining('DEPRECATED'));
|
||||
});
|
||||
|
||||
it('rejects an invalid tier with an error listing all three valid values', () => {
|
||||
expect(() =>
|
||||
validateConfig({
|
||||
tier: 'invalid',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
}),
|
||||
).toThrow(/local.*standalone.*federated|federated.*standalone.*local/);
|
||||
});
|
||||
|
||||
it('error message for invalid tier mentions all three valid values', () => {
|
||||
let message = '';
|
||||
try {
|
||||
validateConfig({
|
||||
tier: 'invalid',
|
||||
storage: { type: 'postgres', url: 'postgresql://...' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
});
|
||||
} catch (err) {
|
||||
message = err instanceof Error ? err.message : String(err);
|
||||
}
|
||||
expect(message).toContain('"local"');
|
||||
expect(message).toContain('"standalone"');
|
||||
expect(message).toContain('"federated"');
|
||||
});
|
||||
});
|
||||
|
||||
describe('DEFAULT_* config constants', () => {
|
||||
it('DEFAULT_LOCAL_CONFIG has tier="local"', () => {
|
||||
expect(DEFAULT_LOCAL_CONFIG.tier).toBe('local');
|
||||
});
|
||||
|
||||
it('DEFAULT_STANDALONE_CONFIG has tier="standalone"', () => {
|
||||
expect(DEFAULT_STANDALONE_CONFIG.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG has tier="federated" and pgvector memory', () => {
|
||||
expect(DEFAULT_FEDERATED_CONFIG.tier).toBe('federated');
|
||||
expect(DEFAULT_FEDERATED_CONFIG.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG uses port 5433 (distinct from standalone 5432)', () => {
|
||||
const url = (DEFAULT_FEDERATED_CONFIG.storage as { url: string }).url;
|
||||
expect(url).toContain('5433');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG has enableVector=true on storage', () => {
|
||||
const storage = DEFAULT_FEDERATED_CONFIG.storage as {
|
||||
type: string;
|
||||
url: string;
|
||||
enableVector?: boolean;
|
||||
};
|
||||
expect(storage.enableVector).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('detectFromEnv — tier env-var routing', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
// Work on a fresh copy so individual tests can set/delete keys freely.
|
||||
process.env = { ...originalEnv };
|
||||
delete process.env['MOSAIC_STORAGE_TIER'];
|
||||
delete process.env['DATABASE_URL'];
|
||||
delete process.env['VALKEY_URL'];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('no env vars → returns local config', () => {
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('local');
|
||||
expect(config.storage.type).toBe('pglite');
|
||||
expect(config.memory.type).toBe('keyword');
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=federated alone → returns federated config with enableVector=true', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('federated');
|
||||
expect(config.memory.type).toBe('pgvector');
|
||||
const storage = config.storage as { type: string; enableVector?: boolean };
|
||||
expect(storage.enableVector).toBe(true);
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=federated + DATABASE_URL → uses the URL and still has enableVector=true', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
process.env['DATABASE_URL'] = 'postgresql://custom:pass@db.example.com:5432/mydb';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('federated');
|
||||
const storage = config.storage as { type: string; url: string; enableVector?: boolean };
|
||||
expect(storage.url).toBe('postgresql://custom:pass@db.example.com:5432/mydb');
|
||||
expect(storage.enableVector).toBe(true);
|
||||
expect(config.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=standalone alone → returns standalone-shaped config (not local)', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'standalone';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('standalone');
|
||||
expect(config.storage.type).toBe('postgres');
|
||||
expect(config.memory.type).toBe('keyword');
|
||||
});
|
||||
});
|
||||
@@ -7,7 +7,7 @@ import type { QueueAdapterConfig as QueueConfig } from '@mosaicstack/queue';
|
||||
/* Types */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export type StorageTier = 'local' | 'standalone' | 'federated';
|
||||
export type StorageTier = 'local' | 'team';
|
||||
|
||||
export interface MemoryConfigRef {
|
||||
type: 'pgvector' | 'sqlite-vec' | 'keyword';
|
||||
@@ -31,21 +31,10 @@ export const DEFAULT_LOCAL_CONFIG: MosaicConfig = {
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
export const DEFAULT_STANDALONE_CONFIG: MosaicConfig = {
|
||||
tier: 'standalone',
|
||||
export const DEFAULT_TEAM_CONFIG: MosaicConfig = {
|
||||
tier: 'team',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'pgvector' },
|
||||
};
|
||||
|
||||
@@ -53,7 +42,7 @@ export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = {
|
||||
/* Validation */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const VALID_TIERS = new Set<string>(['local', 'standalone', 'federated']);
|
||||
const VALID_TIERS = new Set<string>(['local', 'team']);
|
||||
const VALID_STORAGE_TYPES = new Set<string>(['postgres', 'pglite', 'files']);
|
||||
const VALID_QUEUE_TYPES = new Set<string>(['bullmq', 'local']);
|
||||
const VALID_MEMORY_TYPES = new Set<string>(['pgvector', 'sqlite-vec', 'keyword']);
|
||||
@@ -66,19 +55,9 @@ export function validateConfig(raw: unknown): MosaicConfig {
|
||||
const obj = raw as Record<string, unknown>;
|
||||
|
||||
// tier
|
||||
let tier = obj['tier'];
|
||||
// Deprecated alias: 'team' → 'standalone' (kept for backward-compat with 0.0.x installs)
|
||||
if (tier === 'team') {
|
||||
process.stderr.write(
|
||||
'[mosaic] DEPRECATED: tier="team" is deprecated — use "standalone" instead. ' +
|
||||
'Update your mosaic.config.json.\n',
|
||||
);
|
||||
tier = 'standalone';
|
||||
}
|
||||
const tier = obj['tier'];
|
||||
if (typeof tier !== 'string' || !VALID_TIERS.has(tier)) {
|
||||
throw new Error(
|
||||
`Invalid tier "${String(tier)}" — expected "local", "standalone", or "federated"`,
|
||||
);
|
||||
throw new Error(`Invalid tier "${String(tier)}" — expected "local" or "team"`);
|
||||
}
|
||||
|
||||
// storage
|
||||
@@ -123,33 +102,10 @@ export function validateConfig(raw: unknown): MosaicConfig {
|
||||
/* Loader */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export function detectFromEnv(): MosaicConfig {
|
||||
const tier = process.env['MOSAIC_STORAGE_TIER'];
|
||||
|
||||
if (tier === 'federated') {
|
||||
function detectFromEnv(): MosaicConfig {
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_FEDERATED_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
enableVector: true,
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: process.env['VALKEY_URL'],
|
||||
},
|
||||
};
|
||||
}
|
||||
// MOSAIC_STORAGE_TIER=federated without DATABASE_URL — use the default
|
||||
// federated config (port 5433, enableVector: true, pgvector memory).
|
||||
return DEFAULT_FEDERATED_CONFIG;
|
||||
}
|
||||
|
||||
if (tier === 'standalone') {
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_STANDALONE_CONFIG,
|
||||
...DEFAULT_TEAM_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
@@ -160,26 +116,6 @@ export function detectFromEnv(): MosaicConfig {
|
||||
},
|
||||
};
|
||||
}
|
||||
// MOSAIC_STORAGE_TIER=standalone without DATABASE_URL — use the default
|
||||
// standalone config instead of silently falling back to local.
|
||||
return DEFAULT_STANDALONE_CONFIG;
|
||||
}
|
||||
|
||||
// Legacy: DATABASE_URL set without MOSAIC_STORAGE_TIER — treat as standalone.
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_STANDALONE_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: process.env['VALKEY_URL'],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return DEFAULT_LOCAL_CONFIG;
|
||||
}
|
||||
|
||||
|
||||
@@ -372,11 +372,7 @@ export const messages = pgTable(
|
||||
|
||||
// ─── pgvector custom type ───────────────────────────────────────────────────
|
||||
|
||||
export const vector = customType<{
|
||||
data: number[];
|
||||
driverParam: string;
|
||||
config: { dimensions: number };
|
||||
}>({
|
||||
const vector = customType<{ data: number[]; driverParam: string; config: { dimensions: number } }>({
|
||||
dataType(config) {
|
||||
return `vector(${config?.dimensions ?? 1536})`;
|
||||
},
|
||||
|
||||
@@ -222,17 +222,12 @@ sync_framework
|
||||
mkdir -p "$TARGET_DIR/memory"
|
||||
mkdir -p "$TARGET_DIR/credentials"
|
||||
|
||||
# Seed defaults — copy framework contract files from defaults/ to framework
|
||||
# root if not already present. These ship with sensible defaults but must
|
||||
# Seed defaults — copy from defaults/ to framework root if not already present.
|
||||
# These are user-editable files that ship with sensible defaults but should
|
||||
# never be overwritten once the user has customized them.
|
||||
#
|
||||
# This list must match the framework-contract whitelist in
|
||||
# packages/mosaic/src/config/file-adapter.ts (FileConfigAdapter.syncFramework).
|
||||
# SOUL.md and USER.md are intentionally NOT seeded here — they are generated
|
||||
# by `mosaic init` from templates with user-supplied values.
|
||||
DEFAULTS_DIR="$TARGET_DIR/defaults"
|
||||
if [[ -d "$DEFAULTS_DIR" ]]; then
|
||||
for default_file in AGENTS.md STANDARDS.md TOOLS.md; do
|
||||
for default_file in AGENTS.md STANDARDS.md; do
|
||||
if [[ -f "$DEFAULTS_DIR/$default_file" ]] && [[ ! -f "$TARGET_DIR/$default_file" ]]; then
|
||||
cp "$DEFAULTS_DIR/$default_file" "$TARGET_DIR/$default_file"
|
||||
ok "Seeded $default_file from defaults"
|
||||
|
||||
@@ -5,32 +5,32 @@ Project-specific tooling belongs in the project's `AGENTS.md`, not here.
|
||||
|
||||
## Mosaic Git Wrappers (Use First)
|
||||
|
||||
Mosaic wrappers at `~/.config/mosaic/tools/git/*.sh` handle platform detection and edge cases. Always use these before raw CLI commands.
|
||||
Mosaic wrappers at `~/.config/mosaic/rails/git/*.sh` handle platform detection and edge cases. Always use these before raw CLI commands.
|
||||
|
||||
```bash
|
||||
# Issues
|
||||
~/.config/mosaic/tools/git/issue-create.sh
|
||||
~/.config/mosaic/tools/git/issue-close.sh
|
||||
~/.config/mosaic/rails/git/issue-create.sh
|
||||
~/.config/mosaic/rails/git/issue-close.sh
|
||||
|
||||
# PRs
|
||||
~/.config/mosaic/tools/git/pr-create.sh
|
||||
~/.config/mosaic/tools/git/pr-merge.sh
|
||||
~/.config/mosaic/rails/git/pr-create.sh
|
||||
~/.config/mosaic/rails/git/pr-merge.sh
|
||||
|
||||
# Milestones
|
||||
~/.config/mosaic/tools/git/milestone-create.sh
|
||||
~/.config/mosaic/rails/git/milestone-create.sh
|
||||
|
||||
# CI queue guard (required before push/merge)
|
||||
~/.config/mosaic/tools/git/ci-queue-wait.sh --purpose push|merge
|
||||
~/.config/mosaic/rails/git/ci-queue-wait.sh --purpose push|merge
|
||||
```
|
||||
|
||||
## Code Review (Codex)
|
||||
|
||||
```bash
|
||||
# Code quality review
|
||||
~/.config/mosaic/tools/codex/codex-code-review.sh --uncommitted
|
||||
~/.config/mosaic/rails/codex/codex-code-review.sh --uncommitted
|
||||
|
||||
# Security review
|
||||
~/.config/mosaic/tools/codex/codex-security-review.sh --uncommitted
|
||||
~/.config/mosaic/rails/codex/codex-security-review.sh --uncommitted
|
||||
```
|
||||
|
||||
## Git Providers
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@mosaicstack/mosaic",
|
||||
"version": "0.0.30",
|
||||
"version": "0.0.29",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://git.mosaicstack.dev/mosaicstack/stack.git",
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach, type MockInstance } from 'vitest';
|
||||
import { Command } from 'commander';
|
||||
import { registerRuntimeLaunchers, type RuntimeLaunchHandler } from './launch.js';
|
||||
|
||||
/**
|
||||
* Tests for the commander wiring between `mosaic <runtime>` / `mosaic yolo <runtime>`
|
||||
* subcommands and the internal `launchRuntime` dispatcher.
|
||||
*
|
||||
* Regression target: see mosaicstack/stack#454 — before the fix, `mosaic yolo claude`
|
||||
* passed the literal string "claude" as an excess positional argument to the
|
||||
* underlying CLI, which Claude Code then interpreted as the first user message.
|
||||
*
|
||||
* The bug existed because Commander.js includes declared positional arguments
|
||||
* (here `<runtime>`) in `cmd.args` alongside any true excess args. The action
|
||||
* handler must slice them off before forwarding.
|
||||
*/
|
||||
|
||||
function buildProgram(handler: RuntimeLaunchHandler): Command {
|
||||
const program = new Command();
|
||||
program.exitOverride(); // prevent process.exit on parse errors
|
||||
registerRuntimeLaunchers(program, handler);
|
||||
return program;
|
||||
}
|
||||
|
||||
// `process.exit` returns `never`, so vi.spyOn demands a replacement with the
|
||||
// same signature. We throw from the mock to short-circuit into test-land.
|
||||
const exitThrows = (): never => {
|
||||
throw new Error('process.exit called');
|
||||
};
|
||||
|
||||
describe('registerRuntimeLaunchers — non-yolo subcommands', () => {
|
||||
let mockExit: MockInstance<typeof process.exit>;
|
||||
|
||||
beforeEach(() => {
|
||||
// process.exit is called when the yolo action rejects an invalid runtime.
|
||||
// Stub it so the assertion catches the rejection instead of terminating
|
||||
// the test runner.
|
||||
mockExit = vi.spyOn(process, 'exit').mockImplementation(exitThrows);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mockExit.mockRestore();
|
||||
});
|
||||
|
||||
it.each(['claude', 'codex', 'opencode', 'pi'] as const)(
|
||||
'forwards %s with empty extraArgs and yolo=false',
|
||||
(runtime) => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', runtime]);
|
||||
|
||||
expect(handler).toHaveBeenCalledTimes(1);
|
||||
expect(handler).toHaveBeenCalledWith(runtime, [], false);
|
||||
},
|
||||
);
|
||||
|
||||
it('forwards excess args after a non-yolo runtime subcommand', () => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', 'claude', '--print', 'hello']);
|
||||
|
||||
expect(handler).toHaveBeenCalledWith('claude', ['--print', 'hello'], false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('registerRuntimeLaunchers — yolo <runtime>', () => {
|
||||
let mockExit: MockInstance<typeof process.exit>;
|
||||
let mockError: MockInstance<typeof console.error>;
|
||||
|
||||
beforeEach(() => {
|
||||
mockExit = vi.spyOn(process, 'exit').mockImplementation(exitThrows);
|
||||
mockError = vi.spyOn(console, 'error').mockImplementation(() => {});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
mockExit.mockRestore();
|
||||
mockError.mockRestore();
|
||||
});
|
||||
|
||||
it.each(['claude', 'codex', 'opencode', 'pi'] as const)(
|
||||
'does NOT pass the runtime name as an extra arg (regression #454) for yolo %s',
|
||||
(runtime) => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', 'yolo', runtime]);
|
||||
|
||||
expect(handler).toHaveBeenCalledTimes(1);
|
||||
// The critical assertion: extraArgs must be empty, not [runtime].
|
||||
// Before the fix, cmd.args was [runtime] and the runtime name leaked
|
||||
// through to the underlying CLI as an initial positional argument.
|
||||
expect(handler).toHaveBeenCalledWith(runtime, [], true);
|
||||
},
|
||||
);
|
||||
|
||||
it('forwards true excess args after a yolo runtime', () => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
program.parse(['node', 'mosaic', 'yolo', 'claude', '--print', 'hi']);
|
||||
|
||||
expect(handler).toHaveBeenCalledWith('claude', ['--print', 'hi'], true);
|
||||
});
|
||||
|
||||
it('rejects an unknown runtime under yolo without invoking the handler', () => {
|
||||
const handler = vi.fn();
|
||||
const program = buildProgram(handler);
|
||||
|
||||
expect(() => program.parse(['node', 'mosaic', 'yolo', 'bogus'])).toThrow('process.exit called');
|
||||
expect(handler).not.toHaveBeenCalled();
|
||||
expect(mockExit).toHaveBeenCalledWith(1);
|
||||
});
|
||||
});
|
||||
@@ -757,23 +757,8 @@ function runUpgrade(args: string[]): never {
|
||||
|
||||
// ─── Commander registration ─────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Handler invoked when a runtime subcommand (`<runtime>` or `yolo <runtime>`)
|
||||
* is parsed. Exposed so tests can exercise the commander wiring without
|
||||
* spawning subprocesses.
|
||||
*/
|
||||
export type RuntimeLaunchHandler = (
|
||||
runtime: RuntimeName,
|
||||
extraArgs: string[],
|
||||
yolo: boolean,
|
||||
) => void;
|
||||
|
||||
/**
|
||||
* Wire `<runtime>` and `yolo <runtime>` subcommands onto `program` using a
|
||||
* pluggable launch handler. Separated from `registerLaunchCommands` so tests
|
||||
* can inject a spy and verify argument forwarding.
|
||||
*/
|
||||
export function registerRuntimeLaunchers(program: Command, handler: RuntimeLaunchHandler): void {
|
||||
export function registerLaunchCommands(program: Command): void {
|
||||
// Runtime launchers
|
||||
for (const runtime of ['claude', 'codex', 'opencode', 'pi'] as const) {
|
||||
program
|
||||
.command(runtime)
|
||||
@@ -781,10 +766,11 @@ export function registerRuntimeLaunchers(program: Command, handler: RuntimeLaunc
|
||||
.allowUnknownOption(true)
|
||||
.allowExcessArguments(true)
|
||||
.action((_opts: unknown, cmd: Command) => {
|
||||
handler(runtime, cmd.args, false);
|
||||
launchRuntime(runtime, cmd.args, false);
|
||||
});
|
||||
}
|
||||
|
||||
// Yolo mode
|
||||
program
|
||||
.command('yolo <runtime>')
|
||||
.description('Launch a runtime in dangerous-permissions mode (claude|codex|opencode|pi)')
|
||||
@@ -798,20 +784,7 @@ export function registerRuntimeLaunchers(program: Command, handler: RuntimeLaunc
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
// Commander includes declared positional arguments (`<runtime>`) in
|
||||
// `cmd.args` alongside any trailing excess args. Slice off the first
|
||||
// element so we forward only true excess args — otherwise the runtime
|
||||
// name leaks into the underlying CLI as an initial positional arg,
|
||||
// which Claude Code interprets as the first user message.
|
||||
// Regression test: launch.spec.ts, issue mosaicstack/stack#454.
|
||||
handler(runtime as RuntimeName, cmd.args.slice(1), true);
|
||||
});
|
||||
}
|
||||
|
||||
export function registerLaunchCommands(program: Command): void {
|
||||
// Runtime launchers + yolo mode wired to the real process-replacing launcher.
|
||||
registerRuntimeLaunchers(program, (runtime, extraArgs, yolo) => {
|
||||
launchRuntime(runtime, extraArgs, yolo);
|
||||
launchRuntime(runtime as RuntimeName, cmd.args, true);
|
||||
});
|
||||
|
||||
// Coord (mission orchestrator)
|
||||
|
||||
@@ -1,134 +0,0 @@
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { mkdtempSync, mkdirSync, writeFileSync, rmSync, readFileSync, existsSync } from 'node:fs';
|
||||
import { tmpdir } from 'node:os';
|
||||
import { join } from 'node:path';
|
||||
import { FileConfigAdapter, DEFAULT_SEED_FILES } from './file-adapter.js';
|
||||
|
||||
/**
|
||||
* Regression tests for the `FileConfigAdapter.syncFramework` seed behavior.
|
||||
*
|
||||
* Background: the bash installer (`framework/install.sh`) and this TS wizard
|
||||
* path both seed framework-contract files from `framework/defaults/` into the
|
||||
* user's mosaic home on first install. Before this fix:
|
||||
*
|
||||
* - The bash installer only seeded `AGENTS.md` and `STANDARDS.md`, leaving
|
||||
* `TOOLS.md` missing despite it being listed as mandatory in the
|
||||
* AGENTS.md load order (position 5).
|
||||
* - The TS wizard iterated every file in `defaults/` and copied it to the
|
||||
* mosaic home root — including `defaults/SOUL.md` (hardcoded "Jarvis"),
|
||||
* `defaults/USER.md` (placeholder), and internal framework files like
|
||||
* `README.md` and `AUDIT-*.md`. That clobbered the identity flow on
|
||||
* fresh installs and leaked framework-internal clutter into the user's
|
||||
* home directory.
|
||||
*
|
||||
* This suite pins the whitelist and the preservation semantics so both
|
||||
* regressions stay fixed.
|
||||
*/
|
||||
|
||||
function makeFixture(): { sourceDir: string; mosaicHome: string; defaultsDir: string } {
|
||||
const root = mkdtempSync(join(tmpdir(), 'mosaic-file-adapter-'));
|
||||
const sourceDir = join(root, 'source');
|
||||
const mosaicHome = join(root, 'mosaic-home');
|
||||
const defaultsDir = join(sourceDir, 'defaults');
|
||||
|
||||
mkdirSync(defaultsDir, { recursive: true });
|
||||
mkdirSync(mosaicHome, { recursive: true });
|
||||
|
||||
// Framework-contract defaults we expect the wizard to seed.
|
||||
writeFileSync(join(defaultsDir, 'AGENTS.md'), '# AGENTS default\n');
|
||||
writeFileSync(join(defaultsDir, 'STANDARDS.md'), '# STANDARDS default\n');
|
||||
writeFileSync(join(defaultsDir, 'TOOLS.md'), '# TOOLS default\n');
|
||||
|
||||
// Non-contract files we must NOT seed on first install.
|
||||
writeFileSync(join(defaultsDir, 'SOUL.md'), '# SOUL default (should not be seeded)\n');
|
||||
writeFileSync(join(defaultsDir, 'USER.md'), '# USER default (should not be seeded)\n');
|
||||
writeFileSync(join(defaultsDir, 'README.md'), '# README (framework-internal)\n');
|
||||
writeFileSync(
|
||||
join(defaultsDir, 'AUDIT-2026-02-17-framework-consistency.md'),
|
||||
'# Audit snapshot\n',
|
||||
);
|
||||
|
||||
return { sourceDir, mosaicHome, defaultsDir };
|
||||
}
|
||||
|
||||
describe('FileConfigAdapter.syncFramework — defaults seeding', () => {
|
||||
let fixture: ReturnType<typeof makeFixture>;
|
||||
|
||||
beforeEach(() => {
|
||||
fixture = makeFixture();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
rmSync(join(fixture.sourceDir, '..'), { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('seeds the three framework-contract files on a fresh mosaic home', async () => {
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
|
||||
await adapter.syncFramework('fresh');
|
||||
|
||||
for (const name of DEFAULT_SEED_FILES) {
|
||||
expect(existsSync(join(fixture.mosaicHome, name))).toBe(true);
|
||||
}
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'TOOLS.md'), 'utf-8')).toContain(
|
||||
'# TOOLS default',
|
||||
);
|
||||
});
|
||||
|
||||
it('does NOT seed SOUL.md or USER.md from defaults/ (wizard stages own those)', async () => {
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
|
||||
await adapter.syncFramework('fresh');
|
||||
|
||||
// SOUL.md and USER.md live in defaults/ for historical reasons, but they
|
||||
// are template-rendered per-user by the wizard stages. Seeding them here
|
||||
// would clobber the identity flow and leak placeholder content.
|
||||
expect(existsSync(join(fixture.mosaicHome, 'SOUL.md'))).toBe(false);
|
||||
expect(existsSync(join(fixture.mosaicHome, 'USER.md'))).toBe(false);
|
||||
});
|
||||
|
||||
it('does NOT seed README.md or AUDIT-*.md from defaults/', async () => {
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
|
||||
await adapter.syncFramework('fresh');
|
||||
|
||||
expect(existsSync(join(fixture.mosaicHome, 'README.md'))).toBe(false);
|
||||
expect(existsSync(join(fixture.mosaicHome, 'AUDIT-2026-02-17-framework-consistency.md'))).toBe(
|
||||
false,
|
||||
);
|
||||
});
|
||||
|
||||
it('preserves existing contract files — never overwrites user customization', async () => {
|
||||
// Also plant a root-level AGENTS.md in sourceDir so that `syncDirectory`
|
||||
// itself (not just the seed loop) has something to try to overwrite.
|
||||
// Without this, the test would silently pass even if preserve semantics
|
||||
// were broken in syncDirectory.
|
||||
writeFileSync(join(fixture.sourceDir, 'AGENTS.md'), '# shipped AGENTS from source root\n');
|
||||
|
||||
writeFileSync(join(fixture.mosaicHome, 'TOOLS.md'), '# user-customized TOOLS\n');
|
||||
writeFileSync(join(fixture.mosaicHome, 'AGENTS.md'), '# user-customized AGENTS\n');
|
||||
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
await adapter.syncFramework('keep');
|
||||
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'TOOLS.md'), 'utf-8')).toBe(
|
||||
'# user-customized TOOLS\n',
|
||||
);
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'AGENTS.md'), 'utf-8')).toBe(
|
||||
'# user-customized AGENTS\n',
|
||||
);
|
||||
// And the missing contract file still gets seeded.
|
||||
expect(readFileSync(join(fixture.mosaicHome, 'STANDARDS.md'), 'utf-8')).toContain(
|
||||
'# STANDARDS default',
|
||||
);
|
||||
});
|
||||
|
||||
it('is a no-op for seeding when defaults/ dir does not exist', async () => {
|
||||
rmSync(fixture.defaultsDir, { recursive: true });
|
||||
|
||||
const adapter = new FileConfigAdapter(fixture.mosaicHome, fixture.sourceDir);
|
||||
await expect(adapter.syncFramework('fresh')).resolves.toBeUndefined();
|
||||
|
||||
expect(existsSync(join(fixture.mosaicHome, 'TOOLS.md'))).toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -1,19 +1,5 @@
|
||||
import { readFileSync, existsSync, statSync, copyFileSync } from 'node:fs';
|
||||
import { readFileSync, existsSync, readdirSync, statSync, copyFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
|
||||
/**
|
||||
* Framework-contract files that `syncFramework` seeds from `framework/defaults/`
|
||||
* into the mosaic home root on first install. These are the only files the
|
||||
* wizard is allowed to touch as a one-time seed — SOUL.md and USER.md are
|
||||
* generated from templates by their respective wizard stages with
|
||||
* user-supplied values, and anything else under `defaults/` (README.md,
|
||||
* audit snapshots, etc.) is framework-internal and must not leak into the
|
||||
* user's mosaic home.
|
||||
*
|
||||
* This list must match the explicit seed loop in
|
||||
* packages/mosaic/framework/install.sh.
|
||||
*/
|
||||
export const DEFAULT_SEED_FILES = ['AGENTS.md', 'STANDARDS.md', 'TOOLS.md'] as const;
|
||||
import type { ConfigService, ConfigSection, ResolvedConfig } from './config-service.js';
|
||||
import type { SoulConfig, UserConfig, ToolsConfig, InstallAction } from '../types.js';
|
||||
import { soulSchema, userSchema, toolsSchema } from './schemas.js';
|
||||
@@ -145,24 +131,9 @@ export class FileConfigAdapter implements ConfigService {
|
||||
}
|
||||
|
||||
async syncFramework(action: InstallAction): Promise<void> {
|
||||
// Must match PRESERVE_PATHS in packages/mosaic/framework/install.sh so
|
||||
// the bash and TS install paths have the same upgrade-preservation
|
||||
// semantics. Contract files (AGENTS.md, STANDARDS.md, TOOLS.md) are
|
||||
// seeded from defaults/ on first install and preserved thereafter;
|
||||
// identity files (SOUL.md, USER.md) are generated by wizard stages and
|
||||
// must never be touched by the framework sync.
|
||||
const preservePaths =
|
||||
action === 'keep' || action === 'reconfigure'
|
||||
? [
|
||||
'AGENTS.md',
|
||||
'SOUL.md',
|
||||
'USER.md',
|
||||
'TOOLS.md',
|
||||
'STANDARDS.md',
|
||||
'memory',
|
||||
'sources',
|
||||
'credentials',
|
||||
]
|
||||
? ['SOUL.md', 'USER.md', 'TOOLS.md', 'memory']
|
||||
: [];
|
||||
|
||||
syncDirectory(this.sourceDir, this.mosaicHome, {
|
||||
@@ -170,26 +141,23 @@ export class FileConfigAdapter implements ConfigService {
|
||||
excludeGit: true,
|
||||
});
|
||||
|
||||
// Copy framework-contract files (AGENTS.md, STANDARDS.md, TOOLS.md)
|
||||
// from framework/defaults/ into the mosaic home root if they don't
|
||||
// exist yet. These are written on first install only and are never
|
||||
// overwritten afterwards — the user may have customized them.
|
||||
//
|
||||
// SOUL.md and USER.md are deliberately NOT seeded here. They are
|
||||
// generated from templates by the soul/user wizard stages with
|
||||
// user-supplied values; seeding them from defaults would clobber the
|
||||
// identity flow and leak placeholder content into the mosaic home.
|
||||
// Copy default root-level .md files (AGENTS.md, STANDARDS.md, etc.)
|
||||
// from framework/defaults/ into mosaicHome root if they don't exist yet.
|
||||
// These are framework contracts — only written on first install, never
|
||||
// overwritten (user may have customized them).
|
||||
const defaultsDir = join(this.sourceDir, 'defaults');
|
||||
if (existsSync(defaultsDir)) {
|
||||
for (const entry of DEFAULT_SEED_FILES) {
|
||||
const src = join(defaultsDir, entry);
|
||||
for (const entry of readdirSync(defaultsDir)) {
|
||||
const dest = join(this.mosaicHome, entry);
|
||||
if (existsSync(dest)) continue;
|
||||
if (!existsSync(src) || !statSync(src).isFile()) continue;
|
||||
if (!existsSync(dest)) {
|
||||
const src = join(defaultsDir, entry);
|
||||
if (statSync(src).isFile()) {
|
||||
copyFileSync(src, dest);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async readAll(): Promise<ResolvedConfig> {
|
||||
const [soul, user, tools] = await Promise.all([
|
||||
|
||||
@@ -216,30 +216,7 @@ describe('gatewayConfigStage', () => {
|
||||
expect(daemonState.startCalled).toBe(0);
|
||||
});
|
||||
|
||||
it('honors MOSAIC_STORAGE_TIER=standalone in headless path', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'standalone';
|
||||
process.env['MOSAIC_DATABASE_URL'] = 'postgresql://test/db';
|
||||
process.env['MOSAIC_VALKEY_URL'] = 'redis://test:6379';
|
||||
|
||||
const p = buildPrompter();
|
||||
const state = makeState('/home/user/.config/mosaic');
|
||||
|
||||
const result = await gatewayConfigStage(p, state, {
|
||||
host: 'localhost',
|
||||
defaultPort: 14242,
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
expect(result.ready).toBe(true);
|
||||
expect(state.gateway?.tier).toBe('standalone');
|
||||
const envContents = readFileSync(daemonState.envFile, 'utf-8');
|
||||
expect(envContents).toContain('DATABASE_URL=postgresql://test/db');
|
||||
expect(envContents).toContain('VALKEY_URL=redis://test:6379');
|
||||
const mosaicConfig = JSON.parse(readFileSync(daemonState.mosaicConfigFile, 'utf-8'));
|
||||
expect(mosaicConfig.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('accepts deprecated MOSAIC_STORAGE_TIER=team as alias for standalone', async () => {
|
||||
it('honors MOSAIC_STORAGE_TIER=team in headless path', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'team';
|
||||
process.env['MOSAIC_DATABASE_URL'] = 'postgresql://test/db';
|
||||
process.env['MOSAIC_VALKEY_URL'] = 'redis://test:6379';
|
||||
@@ -253,53 +230,13 @@ describe('gatewayConfigStage', () => {
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
// Deprecated alias 'team' maps to 'standalone'
|
||||
expect(result.ready).toBe(true);
|
||||
expect(state.gateway?.tier).toBe('standalone');
|
||||
const mosaicConfig = JSON.parse(readFileSync(daemonState.mosaicConfigFile, 'utf-8'));
|
||||
expect(mosaicConfig.tier).toBe('standalone');
|
||||
});
|
||||
|
||||
it('honors MOSAIC_STORAGE_TIER=federated in headless path', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
process.env['MOSAIC_DATABASE_URL'] = 'postgresql://test/feddb';
|
||||
process.env['MOSAIC_VALKEY_URL'] = 'redis://test:6379';
|
||||
|
||||
const p = buildPrompter();
|
||||
const state = makeState('/home/user/.config/mosaic');
|
||||
|
||||
const result = await gatewayConfigStage(p, state, {
|
||||
host: 'localhost',
|
||||
defaultPort: 14242,
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
expect(result.ready).toBe(true);
|
||||
expect(state.gateway?.tier).toBe('federated');
|
||||
expect(state.gateway?.tier).toBe('team');
|
||||
const envContents = readFileSync(daemonState.envFile, 'utf-8');
|
||||
expect(envContents).toContain('DATABASE_URL=postgresql://test/feddb');
|
||||
expect(envContents).toContain('DATABASE_URL=postgresql://test/db');
|
||||
expect(envContents).toContain('VALKEY_URL=redis://test:6379');
|
||||
const mosaicConfig = JSON.parse(readFileSync(daemonState.mosaicConfigFile, 'utf-8'));
|
||||
expect(mosaicConfig.tier).toBe('federated');
|
||||
expect(mosaicConfig.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('rejects an unknown MOSAIC_STORAGE_TIER value in headless mode with a descriptive warning', async () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federatd'; // deliberate typo
|
||||
|
||||
const warnFn = vi.fn();
|
||||
const p = buildPrompter({ warn: warnFn });
|
||||
const state = makeState('/home/user/.config/mosaic');
|
||||
|
||||
const result = await gatewayConfigStage(p, state, {
|
||||
host: 'localhost',
|
||||
defaultPort: 14242,
|
||||
skipInstall: true,
|
||||
});
|
||||
|
||||
// The stage surfaces validation errors as ready:false (warning is shown to the user).
|
||||
expect(result.ready).toBe(false);
|
||||
// The warning message must name all three valid values.
|
||||
expect(warnFn).toHaveBeenCalledWith(expect.stringMatching(/local.*standalone.*federated/i));
|
||||
expect(mosaicConfig.tier).toBe('team');
|
||||
});
|
||||
|
||||
it('regenerates config when portOverride differs from saved GATEWAY_PORT', async () => {
|
||||
|
||||
@@ -84,15 +84,10 @@ async function promptTier(p: WizardPrompter): Promise<GatewayStorageTier> {
|
||||
hint: 'embedded database, no dependencies',
|
||||
},
|
||||
{
|
||||
value: 'standalone',
|
||||
label: 'Standalone',
|
||||
value: 'team',
|
||||
label: 'Team',
|
||||
hint: 'PostgreSQL + Valkey required',
|
||||
},
|
||||
{
|
||||
value: 'federated',
|
||||
label: 'Federated',
|
||||
hint: 'PostgreSQL + Valkey + pgvector, federation server+client',
|
||||
},
|
||||
],
|
||||
});
|
||||
return tier;
|
||||
@@ -442,21 +437,7 @@ async function collectAndWriteConfig(
|
||||
p.log('Headless mode detected — reading configuration from environment variables.');
|
||||
|
||||
const storageTierEnv = process.env['MOSAIC_STORAGE_TIER'] ?? 'local';
|
||||
if (storageTierEnv === 'team') {
|
||||
// Deprecated alias — warn and treat as standalone
|
||||
process.stderr.write(
|
||||
'[mosaic] DEPRECATED: MOSAIC_STORAGE_TIER=team is deprecated — use "standalone" instead.\n',
|
||||
);
|
||||
tier = 'standalone';
|
||||
} else if (storageTierEnv === 'standalone' || storageTierEnv === 'federated') {
|
||||
tier = storageTierEnv;
|
||||
} else if (storageTierEnv !== '' && storageTierEnv !== 'local') {
|
||||
throw new GatewayConfigValidationError(
|
||||
`Invalid MOSAIC_STORAGE_TIER="${storageTierEnv}" — expected "local", "standalone", or "federated" (deprecated alias "team" also accepted)`,
|
||||
);
|
||||
} else {
|
||||
tier = 'local';
|
||||
}
|
||||
tier = storageTierEnv === 'team' ? 'team' : 'local';
|
||||
|
||||
const portEnv = process.env['MOSAIC_GATEWAY_PORT'];
|
||||
port = portEnv ? parseInt(portEnv, 10) : opts.defaultPort;
|
||||
@@ -472,13 +453,13 @@ async function collectAndWriteConfig(
|
||||
hostname = hostnameEnv;
|
||||
corsOrigin = corsOverride ?? deriveCorsOrigin(hostnameEnv, 3000);
|
||||
|
||||
if (tier === 'standalone' || tier === 'federated') {
|
||||
if (tier === 'team') {
|
||||
const missing: string[] = [];
|
||||
if (!databaseUrl) missing.push('MOSAIC_DATABASE_URL');
|
||||
if (!valkeyUrl) missing.push('MOSAIC_VALKEY_URL');
|
||||
if (missing.length > 0) {
|
||||
throw new GatewayConfigValidationError(
|
||||
`Headless install with tier=${tier} requires env vars: ` + missing.join(', '),
|
||||
'Headless install with tier=team requires env vars: ' + missing.join(', '),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -486,15 +467,11 @@ async function collectAndWriteConfig(
|
||||
tier = await promptTier(p);
|
||||
port = await promptPort(p, opts.defaultPort);
|
||||
|
||||
if (tier === 'standalone' || tier === 'federated') {
|
||||
const defaultDbUrl =
|
||||
tier === 'federated'
|
||||
? 'postgresql://mosaic:mosaic@localhost:5433/mosaic'
|
||||
: 'postgresql://mosaic:mosaic@localhost:5432/mosaic';
|
||||
if (tier === 'team') {
|
||||
databaseUrl = await p.text({
|
||||
message: 'DATABASE_URL',
|
||||
initialValue: defaultDbUrl,
|
||||
defaultValue: defaultDbUrl,
|
||||
initialValue: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
defaultValue: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
});
|
||||
valkeyUrl = await p.text({
|
||||
message: 'VALKEY_URL',
|
||||
@@ -544,7 +521,7 @@ async function collectAndWriteConfig(
|
||||
`OTEL_SERVICE_NAME=mosaic-gateway`,
|
||||
];
|
||||
|
||||
if ((tier === 'standalone' || tier === 'federated') && databaseUrl && valkeyUrl) {
|
||||
if (tier === 'team' && databaseUrl && valkeyUrl) {
|
||||
envLines.push(`DATABASE_URL=${databaseUrl}`);
|
||||
envLines.push(`VALKEY_URL=${valkeyUrl}`);
|
||||
}
|
||||
@@ -568,18 +545,11 @@ async function collectAndWriteConfig(
|
||||
queue: { type: 'local', dataDir: join(opts.gatewayHome, 'queue') },
|
||||
memory: { type: 'keyword' },
|
||||
}
|
||||
: tier === 'federated'
|
||||
? {
|
||||
tier: 'federated',
|
||||
: {
|
||||
tier: 'team',
|
||||
storage: { type: 'postgres', url: databaseUrl },
|
||||
queue: { type: 'bullmq', url: valkeyUrl },
|
||||
memory: { type: 'pgvector' },
|
||||
}
|
||||
: {
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: databaseUrl },
|
||||
queue: { type: 'bullmq', url: valkeyUrl },
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
writeFileSync(opts.mosaicConfigFile, JSON.stringify(mosaicConfig, null, 2) + '\n', {
|
||||
|
||||
@@ -58,7 +58,7 @@ export interface HooksState {
|
||||
acceptedAt?: string;
|
||||
}
|
||||
|
||||
export type GatewayStorageTier = 'local' | 'standalone' | 'federated';
|
||||
export type GatewayStorageTier = 'local' | 'team';
|
||||
|
||||
export interface GatewayAdminState {
|
||||
name: string;
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type { DbHandle } from '@mosaicstack/db';
|
||||
|
||||
// Mock @mosaicstack/db before importing the adapter
|
||||
vi.mock('@mosaicstack/db', async (importOriginal) => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const actual = await importOriginal<Record<string, any>>();
|
||||
return {
|
||||
...actual,
|
||||
createDb: vi.fn(),
|
||||
runMigrations: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
});
|
||||
|
||||
import { createDb, runMigrations } from '@mosaicstack/db';
|
||||
import { PostgresAdapter } from './postgres.js';
|
||||
|
||||
describe('PostgresAdapter — vector extension gating', () => {
|
||||
let mockExecute: ReturnType<typeof vi.fn>;
|
||||
let mockDb: { execute: ReturnType<typeof vi.fn> };
|
||||
let mockHandle: Pick<DbHandle, 'close'> & { db: typeof mockDb };
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockExecute = vi.fn().mockResolvedValue(undefined);
|
||||
mockDb = { execute: mockExecute };
|
||||
mockHandle = { db: mockDb, close: vi.fn().mockResolvedValue(undefined) };
|
||||
vi.mocked(createDb).mockReturnValue(mockHandle as unknown as DbHandle);
|
||||
});
|
||||
|
||||
it('calls db.execute with CREATE EXTENSION IF NOT EXISTS vector when enableVector=true', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: true,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
// Should have called execute
|
||||
expect(mockExecute).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Verify the SQL contains the extension creation statement.
|
||||
// Prefer Drizzle's public toSQL() API; fall back to queryChunks if unavailable.
|
||||
// NOTE: queryChunks is an undocumented Drizzle internal (drizzle-orm ^0.45.x).
|
||||
// toSQL() was not present on the raw sql`` result in this version — if a future
|
||||
// Drizzle upgrade adds it, remove the fallback path and delete this comment.
|
||||
const sqlObj = mockExecute.mock.calls[0]![0] as {
|
||||
toSQL?: () => { sql: string; params: unknown[] };
|
||||
queryChunks?: Array<{ value: string[] }>;
|
||||
};
|
||||
const sqlText = sqlObj.toSQL
|
||||
? sqlObj.toSQL().sql.toLowerCase()
|
||||
: (sqlObj.queryChunks ?? [])
|
||||
.flatMap((chunk) => chunk.value)
|
||||
.join('')
|
||||
.toLowerCase();
|
||||
expect(sqlText).toContain('create extension if not exists vector');
|
||||
});
|
||||
|
||||
it('does NOT call db.execute for extension when enableVector is false', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: false,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(mockExecute).not.toHaveBeenCalled();
|
||||
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('does NOT call db.execute for extension when enableVector is unset', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(mockExecute).not.toHaveBeenCalled();
|
||||
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('calls runMigrations after the extension is created', async () => {
|
||||
const callOrder: string[] = [];
|
||||
mockExecute.mockImplementation(() => {
|
||||
callOrder.push('execute');
|
||||
return Promise.resolve(undefined);
|
||||
});
|
||||
vi.mocked(runMigrations).mockImplementation(() => {
|
||||
callOrder.push('runMigrations');
|
||||
return Promise.resolve();
|
||||
});
|
||||
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: true,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(callOrder).toEqual(['execute', 'runMigrations']);
|
||||
});
|
||||
});
|
||||
@@ -66,19 +66,13 @@ export class PostgresAdapter implements StorageAdapter {
|
||||
private handle: DbHandle;
|
||||
private db: Db;
|
||||
private url: string;
|
||||
private enableVector: boolean;
|
||||
|
||||
constructor(config: Extract<StorageConfig, { type: 'postgres' }>) {
|
||||
this.url = config.url;
|
||||
this.enableVector = config.enableVector ?? false;
|
||||
this.handle = createDb(config.url);
|
||||
this.db = this.handle.db;
|
||||
}
|
||||
|
||||
private async ensureVectorExtension(): Promise<void> {
|
||||
await this.db.execute(sql`CREATE EXTENSION IF NOT EXISTS vector`);
|
||||
}
|
||||
|
||||
async create<T extends Record<string, unknown>>(
|
||||
collection: string,
|
||||
data: T,
|
||||
@@ -155,9 +149,6 @@ export class PostgresAdapter implements StorageAdapter {
|
||||
}
|
||||
|
||||
async migrate(): Promise<void> {
|
||||
if (this.enableVector) {
|
||||
await this.ensureVectorExtension();
|
||||
}
|
||||
await runMigrations(this.url);
|
||||
}
|
||||
|
||||
|
||||
@@ -38,6 +38,6 @@ export interface StorageAdapter {
|
||||
}
|
||||
|
||||
export type StorageConfig =
|
||||
| { type: 'postgres'; url: string; enableVector?: boolean }
|
||||
| { type: 'postgres'; url: string }
|
||||
| { type: 'pglite'; dataDir?: string }
|
||||
| { type: 'files'; dataDir: string; format?: 'json' | 'md' };
|
||||
|
||||
Reference in New Issue
Block a user