Compare commits

..

1 Commits

Author SHA1 Message Date
Jarvis
86edb16947 feat(config): add federated tier and rename team→standalone (FED-M1-01)
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
ci/woodpecker/pr/ci Pipeline was successful
Aligns mosaic.config tier vocabulary with the federation PRD:
- Adds `federated` to the tier enum (Postgres + Valkey + pgvector)
- Renames `team` → `standalone` in StorageTier / GatewayStorageTier
- Keeps `team` as a deprecated alias that warns to stderr and maps to standalone
- Adds DEFAULT_FEDERATED_CONFIG (port 5433, pgvector memory)
- Corrects DEFAULT_STANDALONE_CONFIG.memory to keyword (PRD line 247)
- Wizard interactive promptTier offers local/standalone/federated
- Wizard headless env path now hard-rejects unknown MOSAIC_STORAGE_TIER
  values with a message naming all three valid tiers
- Interactive DATABASE_URL pre-fill is tier-aware: standalone→5432, federated→5433
- 10 new unit tests for the validator + 3 new wizard headless tests

No new behavior beyond the enum and defaults — pgvector enforcement,
tier-detector, and docker-compose overlays land in FED-M1-02..04.

Refs #460

Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
2026-04-19 18:06:37 -05:00
25 changed files with 19 additions and 3345 deletions

View File

@@ -63,10 +63,8 @@
"class-validator": "^0.15.1", "class-validator": "^0.15.1",
"dotenv": "^17.3.1", "dotenv": "^17.3.1",
"fastify": "^5.0.0", "fastify": "^5.0.0",
"ioredis": "^5.10.0",
"node-cron": "^4.2.1", "node-cron": "^4.2.1",
"openai": "^6.32.0", "openai": "^6.32.0",
"postgres": "^3.4.8",
"reflect-metadata": "^0.2.0", "reflect-metadata": "^0.2.0",
"rxjs": "^7.8.0", "rxjs": "^7.8.0",
"socket.io": "^4.8.0", "socket.io": "^4.8.0",

View File

@@ -20,12 +20,10 @@ import { Logger, ValidationPipe } from '@nestjs/common';
import { FastifyAdapter, type NestFastifyApplication } from '@nestjs/platform-fastify'; import { FastifyAdapter, type NestFastifyApplication } from '@nestjs/platform-fastify';
import helmet from '@fastify/helmet'; import helmet from '@fastify/helmet';
import { listSsoStartupWarnings } from '@mosaicstack/auth'; import { listSsoStartupWarnings } from '@mosaicstack/auth';
import { loadConfig } from '@mosaicstack/config';
import { AppModule } from './app.module.js'; import { AppModule } from './app.module.js';
import { mountAuthHandler } from './auth/auth.controller.js'; import { mountAuthHandler } from './auth/auth.controller.js';
import { mountMcpHandler } from './mcp/mcp.controller.js'; import { mountMcpHandler } from './mcp/mcp.controller.js';
import { McpService } from './mcp/mcp.service.js'; import { McpService } from './mcp/mcp.service.js';
import { detectAndAssertTier, TierDetectionError } from '@mosaicstack/storage';
async function bootstrap(): Promise<void> { async function bootstrap(): Promise<void> {
const logger = new Logger('Bootstrap'); const logger = new Logger('Bootstrap');
@@ -34,20 +32,6 @@ async function bootstrap(): Promise<void> {
throw new Error('BETTER_AUTH_SECRET is required'); throw new Error('BETTER_AUTH_SECRET is required');
} }
// Pre-flight: assert all external services required by the configured tier
// are reachable. Runs before NestFactory.create() so failures are visible
// immediately with actionable remediation hints.
const mosaicConfig = loadConfig();
try {
await detectAndAssertTier(mosaicConfig);
} catch (err) {
if (err instanceof TierDetectionError) {
logger.error(`Tier detection failed: ${err.message}`);
logger.error(`Remediation: ${err.remediation}`);
}
throw err;
}
for (const warning of listSsoStartupWarnings()) { for (const warning of listSsoStartupWarnings()) {
logger.warn(warning); logger.warn(warning);
} }

View File

@@ -1,60 +0,0 @@
# docker-compose.federated.yml — Federated tier overlay
#
# USAGE:
# docker compose -f docker-compose.federated.yml --profile federated up -d
#
# This file is a standalone overlay for the Mosaic federated tier.
# It is NOT an extension of docker-compose.yml — it defines its own services
# and named volumes so it can run independently of the base dev stack.
#
# IMPORTANT — HOST PORT CONFLICTS:
# The federated services bind the same host ports as the base dev stack
# (5433 for Postgres, 6380 for Valkey). You must stop the base dev stack
# before starting the federated stack on the same machine:
# docker compose down
# docker compose -f docker-compose.federated.yml --profile federated up -d
#
# pgvector extension:
# The vector extension is created automatically at first boot via
# ./infra/pg-init/01-extensions.sql (CREATE EXTENSION IF NOT EXISTS vector).
#
# Tier configuration:
# Used by `mosaic` instances configured with `tier: federated`.
# DEFAULT_FEDERATED_CONFIG points at:
# postgresql://mosaic:mosaic@localhost:5433/mosaic
services:
postgres-federated:
image: pgvector/pgvector:pg17
profiles: [federated]
ports:
- '${PG_FEDERATED_HOST_PORT:-5433}:5432'
environment:
POSTGRES_USER: mosaic
POSTGRES_PASSWORD: mosaic
POSTGRES_DB: mosaic
volumes:
- pg_federated_data:/var/lib/postgresql/data
- ./infra/pg-init:/docker-entrypoint-initdb.d:ro
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U mosaic']
interval: 5s
timeout: 3s
retries: 5
valkey-federated:
image: valkey/valkey:8-alpine
profiles: [federated]
ports:
- '${VALKEY_FEDERATED_HOST_PORT:-6380}:6379'
volumes:
- valkey_federated_data:/data
healthcheck:
test: ['CMD', 'valkey-cli', 'ping']
interval: 5s
timeout: 3s
retries: 5
volumes:
pg_federated_data:
valkey_federated_data:

View File

@@ -15,20 +15,20 @@
Goal: Gateway runs in `federated` tier with containerized PG+pgvector+Valkey. No federation logic yet. Existing standalone behavior does not regress. Goal: Gateway runs in `federated` tier with containerized PG+pgvector+Valkey. No federation logic yet. Existing standalone behavior does not regress.
| id | status | description | issue | agent | branch | depends_on | estimate | notes | | id | status | description | issue | agent | branch | depends_on | estimate | notes |
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------- | ---------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------- | | --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------- | ---------- | -------- | ----------------------------------------------------------------------------------------------------------------- |
| FED-M1-01 | done | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | sonnet | feat/federation-m1-tier-config | — | 4K | Shipped in PR #470. Renamed `team``standalone`; added `team` deprecation alias; added `DEFAULT_FEDERATED_CONFIG`. | | FED-M1-01 | in-progress | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | sonnet | feat/federation-m1-tier-config | — | 4K | Schema lives in `packages/types`; validator in gateway bootstrap. No behavior change yet — enum only. |
| FED-M1-02 | done | Author `docker-compose.federated.yml` as an overlay profile: Postgres 17 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | sonnet | feat/federation-m1-compose | FED-M1-01 | 5K | Shipped in PR #471. Overlay defines `postgres-federated`/`valkey-federated`, profile-gated, with pg-init for pgvector extension. | | FED-M1-02 | not-started | Author `docker-compose.federated.yml` as an overlay profile: Postgres 16 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | codex | feat/federation-m1-compose | FED-M1-01 | 5K | Overlay on existing `docker-compose.yml`; no changes to base file. Add `profile: federated` gating. |
| FED-M1-03 | done | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | sonnet | feat/federation-m1-pgvector | FED-M1-02 | 8K | Shipped in PR #472. `enableVector` flag on postgres StorageConfig; idempotent CREATE EXTENSION before migrations. | | FED-M1-03 | not-started | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | codex | feat/federation-m1-pgvector | FED-M1-02 | 8K | Extension create is idempotent `CREATE EXTENSION IF NOT EXISTS vector`. Gate on tier = federated. |
| FED-M1-04 | done | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | sonnet | feat/federation-m1-detector | FED-M1-03 | 8K | Shipped in PR #473. 12 tests; 5s timeouts on probes; pgvector library/permission discrimination; rejects non-bullmq for federated. | | FED-M1-04 | not-started | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | codex | feat/federation-m1-detector | FED-M1-03 | 8K | Structured error type with remediation hints. Logs which service failed, with host:port attempted. |
| FED-M1-05 | done | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | sonnet | feat/federation-m1-migrate | FED-M1-04 | 10K | Shipped in PR #474. `mosaic storage migrate-tier`; DrizzleMigrationSource (corrects P0 found in review); 32 tests; idempotent. | | FED-M1-05 | not-started | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | codex | feat/federation-m1-migrate | FED-M1-04 | 10K | Do NOT run automatically. CLI subcommand `mosaic migrate tier --to federated --dry-run`. Safety rails. |
| FED-M1-06 | in-progress | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Existing doctor output evolves; add `--json` flag. Green/yellow/red + remediation suggestions per issue. | | FED-M1-06 | not-started | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Existing doctor output evolves; add `--json` flag. Green/yellow/red + remediation suggestions per issue. |
| FED-M1-07 | not-started | Integration test: gateway boots in `federated` tier with docker-compose `federated` profile; refuses to boot when PG unreachable (asserts fail-fast); pgvector extension query succeeds. | #460 | sonnet | feat/federation-m1-integration | FED-M1-04 | 8K | Vitest + docker-compose test profile. One test file per assertion; real services, no mocks. | | FED-M1-07 | not-started | Integration test: gateway boots in `federated` tier with docker-compose `federated` profile; refuses to boot when PG unreachable (asserts fail-fast); pgvector extension query succeeds. | #460 | sonnet | feat/federation-m1-integration | FED-M1-04 | 8K | Vitest + docker-compose test profile. One test file per assertion; real services, no mocks. |
| FED-M1-08 | not-started | Integration test for migration script: seed a local PGlite with representative data (tasks, notes, users, teams), run migration, assert row counts + key samples equal on federated PG. | #460 | sonnet | feat/federation-m1-migrate-test | FED-M1-05 | 6K | Runs against docker-compose federated profile; uses temp PGlite file; deterministic seed. | | FED-M1-08 | not-started | Integration test for migration script: seed a local PGlite with representative data (tasks, notes, users, teams), run migration, assert row counts + key samples equal on federated PG. | #460 | sonnet | feat/federation-m1-migrate-test | FED-M1-05 | 6K | Runs against docker-compose federated profile; uses temp PGlite file; deterministic seed. |
| FED-M1-09 | not-started | Standalone regression: full agent-session E2E on existing `standalone` tier with a gateway built from this branch. Must pass without referencing any federation module. | #460 | haiku | feat/federation-m1-regression | FED-M1-07 | 4K | Reuse existing e2e harness; just re-point at the federation branch build. Canary that we didn't break it. | | FED-M1-09 | not-started | Standalone regression: full agent-session E2E on existing `standalone` tier with a gateway built from this branch. Must pass without referencing any federation module. | #460 | haiku | feat/federation-m1-regression | FED-M1-07 | 4K | Reuse existing e2e harness; just re-point at the federation branch build. Canary that we didn't break it. |
| FED-M1-10 | not-started | Code review pass: security-focused on the migration script (data-at-rest during migration) + tier detector (error-message sensitivity leakage). Independent reviewer, not authors of tasks 01-09. | #460 | sonnet | — | FED-M1-09 | 8K | Use `feature-dev:code-reviewer` agent. Specifically: no secrets in error messages; no partial-migration footguns. | | FED-M1-10 | not-started | Code review pass: security-focused on the migration script (data-at-rest during migration) + tier detector (error-message sensitivity leakage). Independent reviewer, not authors of tasks 01-09. | #460 | sonnet | — | FED-M1-09 | 8K | Use `feature-dev:code-reviewer` agent. Specifically: no secrets in error messages; no partial-migration footguns. |
| FED-M1-11 | not-started | Docs update: `docs/federation/` operator notes for tier setup; README blurb on federated tier; `docs/guides/` entry for migration. Do NOT touch runbook yet (deferred to FED-M7). | #460 | haiku | feat/federation-m1-docs | FED-M1-10 | 4K | Short, actionable. Link from MISSION-MANIFEST. No decisions captured here — those belong in PRD. | | FED-M1-11 | not-started | Docs update: `docs/federation/` operator notes for tier setup; README blurb on federated tier; `docs/guides/` entry for migration. Do NOT touch runbook yet (deferred to FED-M7). | #460 | haiku | feat/federation-m1-docs | FED-M1-10 | 4K | Short, actionable. Link from MISSION-MANIFEST. No decisions captured here — those belong in PRD. |
| FED-M1-12 | not-started | PR, CI green, merge to main, close #460. | #460 | — | (aggregate) | FED-M1-11 | 3K | Queue-guard before push; wait for green; merge squashed; tea `issue-close` #460. | | FED-M1-12 | not-started | PR, CI green, merge to main, close #460. | #460 | — | (aggregate) | FED-M1-11 | 3K | Queue-guard before push; wait for green; merge squashed; tea `issue-close` #460. |
**M1 total estimate:** ~74K tokens (over-budget vs 20K PRD estimate — explanation below) **M1 total estimate:** ~74K tokens (over-budget vs 20K PRD estimate — explanation below)

View File

@@ -343,39 +343,3 @@ Affected files (storage-tier semantics only — Team/workspace usages unaffected
- `MVP-T04` (sync `.mosaic/orchestrator/mission.json`) still deferred. - `MVP-T04` (sync `.mosaic/orchestrator/mission.json`) still deferred.
- `team` tier rename touches install wizard headless env vars (`MOSAIC_STORAGE_TIER=team`); will need 0.0.x deprecation note in scratchpad if release notes are written this milestone. - `team` tier rename touches install wizard headless env vars (`MOSAIC_STORAGE_TIER=team`); will need 0.0.x deprecation note in scratchpad if release notes are written this milestone.
---
## Session 17 — 2026-04-19 — claude
**Mode:** Delivery (W1 / FED-M1 execution; resumed after compaction)
**Branches landed this run:** `feat/federation-m1-tier-config` (PR #470), `feat/federation-m1-compose` (PR #471), `feat/federation-m1-pgvector` (PR #472)
**Branch active at end:** `feat/federation-m1-detector` (FED-M1-04, ready to push)
**Tasks closed:** FED-M1-01, FED-M1-02, FED-M1-03 (all merged to `main` via squash, CI green, issue #460 still open as milestone).
**FED-M1-04 — tier-detector:** Worker delivered `apps/gateway/src/bootstrap/tier-detector.ts` (~210 lines) + `tier-detector.spec.ts` (12 tests). Independent code review (sonnet) returned `changes-required` with 3 issues:
1. CRITICAL: `probeValkey` missing `connectTimeout: 5000` on the ioredis Redis client (defaulted to 10s, violated fail-fast spec).
2. IMPORTANT: `probePgvector` catch block did not discriminate "library not installed" (use `pgvector/pgvector:pg17`) from permission errors.
3. IMPORTANT: Federated tier silently skipped Valkey probe when `queue.type !== 'bullmq'` (computed Valkey URL conditionally).
Worker fix-up round addressed all three:
- L147: `connectTimeout: 5000` added to Redis options
- L113-117: catch block branches on `extension "vector" is not available` substring → distinct remediation per failure mode
- L206-215: federated branch fails fast with `service: 'config'` if `queue.type !== 'bullmq'`, then probes Valkey unconditionally
- 4 new tests (8 → 12 total) cover each fix specifically
Independent verifier (haiku) confirmed all 6 verification claims (line numbers, test presence, suite green: 12/12 PASS).
**Process note — review pipeline working as designed:**
Initial verifier (haiku) on the first delivery returned "OK to ship" but missed the 3 deeper issues that the sonnet code-reviewer caught. This validates the user's "always verify subagent claims independently with another subagent" rule — but specifically with the **right tier** for the task: code review needs sonnet-level reasoning, while haiku is fine for verifying surface claims (line counts, file existence) once review issues are known. Going forward: code review uses sonnet (`feature-dev:code-reviewer`), claim verification uses haiku.
**Followup tasks tracked but deferred:**
- #7: `tier=local` hardcoded in gateway-config resume branches (~262, ~317) — pre-existing bug, fix during M1-06 (doctor) or M1-09 (regression).
- #8: confirm `packages/config/dist` not git-tracked.
**Next:** PR for FED-M1-04 → CI wait → merge. Then FED-M1-05 (migration script, codex/sonnet, 10K).

View File

@@ -28,7 +28,6 @@ export default tseslint.config(
'apps/web/e2e/helpers/*.ts', 'apps/web/e2e/helpers/*.ts',
'apps/web/playwright.config.ts', 'apps/web/playwright.config.ts',
'apps/gateway/vitest.config.ts', 'apps/gateway/vitest.config.ts',
'packages/storage/vitest.config.ts',
'packages/mosaic/__tests__/*.ts', 'packages/mosaic/__tests__/*.ts',
], ],
}, },

View File

@@ -5,5 +5,4 @@ export {
DEFAULT_FEDERATED_CONFIG, DEFAULT_FEDERATED_CONFIG,
loadConfig, loadConfig,
validateConfig, validateConfig,
detectFromEnv,
} from './mosaic-config.js'; } from './mosaic-config.js';

View File

@@ -1,7 +1,6 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { import {
validateConfig, validateConfig,
detectFromEnv,
DEFAULT_LOCAL_CONFIG, DEFAULT_LOCAL_CONFIG,
DEFAULT_STANDALONE_CONFIG, DEFAULT_STANDALONE_CONFIG,
DEFAULT_FEDERATED_CONFIG, DEFAULT_FEDERATED_CONFIG,
@@ -107,64 +106,4 @@ describe('DEFAULT_* config constants', () => {
const url = (DEFAULT_FEDERATED_CONFIG.storage as { url: string }).url; const url = (DEFAULT_FEDERATED_CONFIG.storage as { url: string }).url;
expect(url).toContain('5433'); expect(url).toContain('5433');
}); });
it('DEFAULT_FEDERATED_CONFIG has enableVector=true on storage', () => {
const storage = DEFAULT_FEDERATED_CONFIG.storage as {
type: string;
url: string;
enableVector?: boolean;
};
expect(storage.enableVector).toBe(true);
});
});
describe('detectFromEnv — tier env-var routing', () => {
const originalEnv = process.env;
beforeEach(() => {
// Work on a fresh copy so individual tests can set/delete keys freely.
process.env = { ...originalEnv };
delete process.env['MOSAIC_STORAGE_TIER'];
delete process.env['DATABASE_URL'];
delete process.env['VALKEY_URL'];
});
afterEach(() => {
process.env = originalEnv;
});
it('no env vars → returns local config', () => {
const config = detectFromEnv();
expect(config.tier).toBe('local');
expect(config.storage.type).toBe('pglite');
expect(config.memory.type).toBe('keyword');
});
it('MOSAIC_STORAGE_TIER=federated alone → returns federated config with enableVector=true', () => {
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
const config = detectFromEnv();
expect(config.tier).toBe('federated');
expect(config.memory.type).toBe('pgvector');
const storage = config.storage as { type: string; enableVector?: boolean };
expect(storage.enableVector).toBe(true);
});
it('MOSAIC_STORAGE_TIER=federated + DATABASE_URL → uses the URL and still has enableVector=true', () => {
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
process.env['DATABASE_URL'] = 'postgresql://custom:pass@db.example.com:5432/mydb';
const config = detectFromEnv();
expect(config.tier).toBe('federated');
const storage = config.storage as { type: string; url: string; enableVector?: boolean };
expect(storage.url).toBe('postgresql://custom:pass@db.example.com:5432/mydb');
expect(storage.enableVector).toBe(true);
expect(config.memory.type).toBe('pgvector');
});
it('MOSAIC_STORAGE_TIER=standalone alone → returns standalone-shaped config (not local)', () => {
process.env['MOSAIC_STORAGE_TIER'] = 'standalone';
const config = detectFromEnv();
expect(config.tier).toBe('standalone');
expect(config.storage.type).toBe('postgres');
expect(config.memory.type).toBe('keyword');
});
}); });

View File

@@ -40,11 +40,7 @@ export const DEFAULT_STANDALONE_CONFIG: MosaicConfig = {
export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = { export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = {
tier: 'federated', tier: 'federated',
storage: { storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic' },
type: 'postgres',
url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
enableVector: true,
},
queue: { type: 'bullmq' }, queue: { type: 'bullmq' },
memory: { type: 'pgvector' }, memory: { type: 'pgvector' },
}; };
@@ -123,49 +119,7 @@ export function validateConfig(raw: unknown): MosaicConfig {
/* Loader */ /* Loader */
/* ------------------------------------------------------------------ */ /* ------------------------------------------------------------------ */
export function detectFromEnv(): MosaicConfig { function detectFromEnv(): MosaicConfig {
const tier = process.env['MOSAIC_STORAGE_TIER'];
if (tier === 'federated') {
if (process.env['DATABASE_URL']) {
return {
...DEFAULT_FEDERATED_CONFIG,
storage: {
type: 'postgres',
url: process.env['DATABASE_URL'],
enableVector: true,
},
queue: {
type: 'bullmq',
url: process.env['VALKEY_URL'],
},
};
}
// MOSAIC_STORAGE_TIER=federated without DATABASE_URL — use the default
// federated config (port 5433, enableVector: true, pgvector memory).
return DEFAULT_FEDERATED_CONFIG;
}
if (tier === 'standalone') {
if (process.env['DATABASE_URL']) {
return {
...DEFAULT_STANDALONE_CONFIG,
storage: {
type: 'postgres',
url: process.env['DATABASE_URL'],
},
queue: {
type: 'bullmq',
url: process.env['VALKEY_URL'],
},
};
}
// MOSAIC_STORAGE_TIER=standalone without DATABASE_URL — use the default
// standalone config instead of silently falling back to local.
return DEFAULT_STANDALONE_CONFIG;
}
// Legacy: DATABASE_URL set without MOSAIC_STORAGE_TIER — treat as standalone.
if (process.env['DATABASE_URL']) { if (process.env['DATABASE_URL']) {
return { return {
...DEFAULT_STANDALONE_CONFIG, ...DEFAULT_STANDALONE_CONFIG,
@@ -179,7 +133,6 @@ export function detectFromEnv(): MosaicConfig {
}, },
}; };
} }
return DEFAULT_LOCAL_CONFIG; return DEFAULT_LOCAL_CONFIG;
} }

View File

@@ -372,11 +372,7 @@ export const messages = pgTable(
// ─── pgvector custom type ─────────────────────────────────────────────────── // ─── pgvector custom type ───────────────────────────────────────────────────
export const vector = customType<{ const vector = customType<{ data: number[]; driverParam: string; config: { dimensions: number } }>({
data: number[];
driverParam: string;
config: { dimensions: number };
}>({
dataType(config) { dataType(config) {
return `vector(${config?.dimensions ?? 1536})`; return `vector(${config?.dimensions ?? 1536})`;
}, },

View File

@@ -1,294 +0,0 @@
/**
* Unit tests for gateway-doctor.ts (mosaic gateway doctor).
*
* All external I/O is mocked — no live services required.
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { TierHealthReport } from '@mosaicstack/storage';
/* ------------------------------------------------------------------ */
/* Shared mock state */
/* ------------------------------------------------------------------ */
const mocks = vi.hoisted(() => {
const mockLoadConfig = vi.fn();
const mockProbeServiceHealth = vi.fn();
const mockExistsSync = vi.fn();
return { mockLoadConfig, mockProbeServiceHealth, mockExistsSync };
});
/* ------------------------------------------------------------------ */
/* Module mocks */
/* ------------------------------------------------------------------ */
vi.mock('@mosaicstack/config', () => ({
loadConfig: mocks.mockLoadConfig,
}));
vi.mock('@mosaicstack/storage', () => ({
probeServiceHealth: mocks.mockProbeServiceHealth,
}));
vi.mock('node:fs', () => ({
existsSync: mocks.mockExistsSync,
}));
/* ------------------------------------------------------------------ */
/* Import SUT */
/* ------------------------------------------------------------------ */
import { runGatewayDoctor } from './gateway-doctor.js';
import type { MosaicConfig } from '@mosaicstack/config';
/* ------------------------------------------------------------------ */
/* Fixtures */
/* ------------------------------------------------------------------ */
const STANDALONE_CONFIG: MosaicConfig = {
tier: 'standalone',
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
queue: { type: 'bullmq', url: 'redis://localhost:6380' },
memory: { type: 'keyword' },
};
const GREEN_REPORT: TierHealthReport = {
tier: 'standalone',
configPath: '/some/mosaic.config.json',
overall: 'green',
services: [
{ name: 'postgres', status: 'ok', host: 'localhost', port: 5432, durationMs: 42 },
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 10 },
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
],
};
const RED_REPORT: TierHealthReport = {
tier: 'standalone',
configPath: '/some/mosaic.config.json',
overall: 'red',
services: [
{
name: 'postgres',
status: 'fail',
host: 'localhost',
port: 5432,
durationMs: 5001,
error: {
message: 'connection refused',
remediation: 'Start Postgres: `docker compose ...`',
},
},
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 8 },
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
],
};
const FEDERATED_GREEN_REPORT: TierHealthReport = {
tier: 'federated',
configPath: '/some/mosaic.config.json',
overall: 'green',
services: [
{ name: 'postgres', status: 'ok', host: 'localhost', port: 5433, durationMs: 30 },
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 5 },
{ name: 'pgvector', status: 'ok', host: 'localhost', port: 5433, durationMs: 25 },
],
};
/* ------------------------------------------------------------------ */
/* Process helpers */
/* ------------------------------------------------------------------ */
let stdoutCapture = '';
let exitCode: number | undefined;
function captureOutput(): void {
stdoutCapture = '';
exitCode = undefined;
vi.spyOn(process.stdout, 'write').mockImplementation((chunk) => {
stdoutCapture += typeof chunk === 'string' ? chunk : chunk.toString();
return true;
});
vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
vi.spyOn(process, 'exit').mockImplementation((code?: string | number | null) => {
exitCode = typeof code === 'number' ? code : code != null ? Number(code) : undefined;
throw new Error(`process.exit(${String(code)})`);
});
vi.spyOn(console, 'log').mockImplementation((...args: unknown[]) => {
stdoutCapture += args.join(' ') + '\n';
});
}
/* ------------------------------------------------------------------ */
/* Tests */
/* ------------------------------------------------------------------ */
describe('runGatewayDoctor', () => {
beforeEach(() => {
vi.clearAllMocks();
captureOutput();
// By default: no config file on disk (env-detection path)
mocks.mockExistsSync.mockReturnValue(false);
mocks.mockLoadConfig.mockReturnValue(STANDALONE_CONFIG);
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
});
afterEach(() => {
vi.restoreAllMocks();
});
/* ---------------------------------------------------------------- */
/* 1. JSON mode: parseable JSON matching the schema */
/* ---------------------------------------------------------------- */
it('JSON mode emits parseable JSON matching TierHealthReport schema', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
await runGatewayDoctor({ json: true });
const parsed = JSON.parse(stdoutCapture) as TierHealthReport;
expect(parsed.tier).toBe('standalone');
expect(parsed.overall).toBe('green');
expect(Array.isArray(parsed.services)).toBe(true);
expect(parsed.services).toHaveLength(3);
// Validate shape of each service check
for (const svc of parsed.services) {
expect(['postgres', 'valkey', 'pgvector']).toContain(svc.name);
expect(['ok', 'fail', 'skipped']).toContain(svc.status);
expect(typeof svc.durationMs).toBe('number');
}
// JSON mode must be silent on console.log — output goes to process.stdout only.
expect(console.log).not.toHaveBeenCalled();
});
it('JSON mode for federated with 3 ok services', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(FEDERATED_GREEN_REPORT);
await runGatewayDoctor({ json: true });
const parsed = JSON.parse(stdoutCapture) as TierHealthReport;
expect(parsed.tier).toBe('federated');
expect(parsed.overall).toBe('green');
expect(parsed.services.every((s) => s.status === 'ok')).toBe(true);
// JSON mode must be silent on console.log — output goes to process.stdout only.
expect(console.log).not.toHaveBeenCalled();
});
/* ---------------------------------------------------------------- */
/* 2. Plain text mode: service lines and overall verdict */
/* ---------------------------------------------------------------- */
it('plain text mode includes service lines for each service', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
await runGatewayDoctor({});
expect(stdoutCapture).toContain('postgres');
expect(stdoutCapture).toContain('valkey');
expect(stdoutCapture).toContain('pgvector');
});
it('plain text mode includes Overall verdict', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
await runGatewayDoctor({});
expect(stdoutCapture).toContain('Overall: GREEN');
});
it('plain text mode shows tier and config path in header', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
await runGatewayDoctor({});
expect(stdoutCapture).toContain('Tier: standalone');
});
it('plain text mode shows remediation for failed services', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
try {
await runGatewayDoctor({});
} catch {
// process.exit throws in test
}
expect(stdoutCapture).toContain('Remediations:');
expect(stdoutCapture).toContain('Start Postgres');
});
/* ---------------------------------------------------------------- */
/* 3. Exit codes */
/* ---------------------------------------------------------------- */
it('exits with code 1 when overall is red', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
await expect(runGatewayDoctor({})).rejects.toThrow('process.exit(1)');
expect(exitCode).toBe(1);
});
it('exits with code 0 (no exit call) when overall is green', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
await runGatewayDoctor({});
// process.exit should NOT have been called for green.
expect(exitCode).toBeUndefined();
});
it('JSON mode exits with code 1 when overall is red', async () => {
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
await expect(runGatewayDoctor({ json: true })).rejects.toThrow('process.exit(1)');
expect(exitCode).toBe(1);
});
/* ---------------------------------------------------------------- */
/* 4. --config path override is honored */
/* ---------------------------------------------------------------- */
it('passes --config path to loadConfig when provided', async () => {
const customPath = '/custom/path/mosaic.config.json';
await runGatewayDoctor({ config: customPath });
// loadConfig should have been called with the resolved custom path.
expect(mocks.mockLoadConfig).toHaveBeenCalledWith(
expect.stringContaining('mosaic.config.json'),
);
// The exact call should include the custom path (resolved).
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
expect(calledPath).toContain('custom/path/mosaic.config.json');
});
it('calls loadConfig without path when no --config and no file on disk', async () => {
mocks.mockExistsSync.mockReturnValue(false);
await runGatewayDoctor({});
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
// When no file found, resolveConfigPath returns undefined, so loadConfig is called with undefined
expect(calledPath).toBeUndefined();
});
it('finds config from cwd when mosaic.config.json exists there', async () => {
// First candidate (cwd/mosaic.config.json) exists.
mocks.mockExistsSync.mockImplementation((p: unknown) => {
return typeof p === 'string' && p.endsWith('mosaic.config.json');
});
await runGatewayDoctor({});
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
expect(calledPath).toBeDefined();
expect(typeof calledPath).toBe('string');
expect(calledPath!.endsWith('mosaic.config.json')).toBe(true);
});
});

View File

@@ -1,143 +0,0 @@
/**
* gateway-doctor.ts — `mosaic gateway doctor` implementation.
*
* Reports current tier and per-service health (PG, Valkey, pgvector) for the
* Mosaic gateway. Supports machine-readable JSON output for CI.
*
* Exit codes:
* 0 — overall green or yellow
* 1 — overall red (at least one required service failed)
*/
import { existsSync } from 'node:fs';
import { resolve, join } from 'node:path';
import { homedir } from 'node:os';
import { loadConfig } from '@mosaicstack/config';
import { probeServiceHealth } from '@mosaicstack/storage';
import type { TierHealthReport, ServiceCheck } from '@mosaicstack/storage';
/* ------------------------------------------------------------------ */
/* Config resolution */
/* ------------------------------------------------------------------ */
const CONFIG_CANDIDATES = [
resolve(process.cwd(), 'mosaic.config.json'),
join(homedir(), '.mosaic', 'mosaic.config.json'),
];
/**
* Resolve the config path to report in output.
*
* Priority:
* 1. Explicit `--config <path>` flag
* 2. `./mosaic.config.json` (cwd)
* 3. `~/.mosaic/mosaic.config.json`
* 4. undefined — `loadConfig()` falls back to env-var detection
*
* `loadConfig()` itself already handles priority 1-3 when passed an explicit
* path, and falls back to env-detection when none exists. We resolve here
* only so we can surface the path in the health report.
*/
function resolveConfigPath(explicit?: string): string | undefined {
if (explicit) return resolve(explicit);
for (const candidate of CONFIG_CANDIDATES) {
if (existsSync(candidate)) return candidate;
}
return undefined;
}
/* ------------------------------------------------------------------ */
/* Output helpers */
/* ------------------------------------------------------------------ */
const TICK = '\u2713'; // ✓
const CROSS = '\u2717'; // ✗
const SKIP = '-';
function padRight(s: string, n: number): string {
return s + ' '.repeat(Math.max(0, n - s.length));
}
function serviceLabel(svc: ServiceCheck): string {
const hostPort =
svc.host !== undefined && svc.port !== undefined ? `${svc.host}:${svc.port.toString()}` : '';
const duration = `(${svc.durationMs.toString()}ms)`;
switch (svc.status) {
case 'ok':
return ` ${TICK} ${padRight(svc.name, 10)} ${padRight(hostPort, 22)} ${duration}`;
case 'fail': {
const errMsg = svc.error?.message ?? 'unknown error';
return ` ${CROSS} ${padRight(svc.name, 10)} ${padRight(hostPort, 22)} ${duration} \u2192 ${errMsg}`;
}
case 'skipped':
return ` ${SKIP} ${padRight(svc.name, 10)} (skipped)`;
}
}
function printReport(report: TierHealthReport): void {
const configDisplay = report.configPath ?? '(auto-detected)';
console.log(`Tier: ${report.tier} Config: ${configDisplay}`);
console.log('');
for (const svc of report.services) {
console.log(serviceLabel(svc));
}
console.log('');
// Print remediations for failed services.
const failed = report.services.filter((s) => s.status === 'fail' && s.error);
if (failed.length > 0) {
console.log('Remediations:');
for (const svc of failed) {
if (svc.error) {
console.log(` ${svc.name}: ${svc.error.remediation}`);
}
}
console.log('');
}
console.log(`Overall: ${report.overall.toUpperCase()}`);
}
/* ------------------------------------------------------------------ */
/* Main runner */
/* ------------------------------------------------------------------ */
export interface GatewayDoctorOptions {
json?: boolean;
config?: string;
}
export async function runGatewayDoctor(opts: GatewayDoctorOptions): Promise<void> {
const configPath = resolveConfigPath(opts.config);
let mosaicConfig;
try {
mosaicConfig = loadConfig(configPath);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
if (opts.json) {
process.stdout.write(
JSON.stringify({ error: `Failed to load config: ${msg}` }, null, 2) + '\n',
);
} else {
process.stderr.write(`Error: Failed to load config: ${msg}\n`);
}
process.exit(1);
}
const report = await probeServiceHealth(mosaicConfig, configPath);
if (opts.json) {
process.stdout.write(JSON.stringify(report, null, 2) + '\n');
} else {
printReport(report);
}
// Exit 1 if overall is red.
if (report.overall === 'red') {
process.exit(1);
}
}

View File

@@ -206,15 +206,4 @@ export function registerGatewayCommand(program: Command): void {
const { runUninstall } = await import('./gateway/uninstall.js'); const { runUninstall } = await import('./gateway/uninstall.js');
await runUninstall(); await runUninstall();
}); });
// ─── doctor ─────────────────────────────────────────────────────────────────
gw.command('doctor')
.description('Check gateway tier and per-service health (PG, Valkey, pgvector)')
.option('--json', 'Emit TierHealthReport as JSON to stdout (suppresses all other output)')
.option('--config <path>', 'Path to mosaic.config.json (defaults to cwd or ~/.mosaic/)')
.action(async (cmdOpts: { json?: boolean; config?: string }) => {
const { runGatewayDoctor } = await import('./gateway-doctor.js');
await runGatewayDoctor({ json: cmdOpts.json, config: cmdOpts.config });
});
} }

View File

@@ -24,9 +24,7 @@
"@electric-sql/pglite": "^0.2.17", "@electric-sql/pglite": "^0.2.17",
"@mosaicstack/db": "workspace:^", "@mosaicstack/db": "workspace:^",
"@mosaicstack/types": "workspace:*", "@mosaicstack/types": "workspace:*",
"commander": "^13.0.0", "commander": "^13.0.0"
"ioredis": "^5.10.0",
"postgres": "^3.4.8"
}, },
"devDependencies": { "devDependencies": {
"typescript": "^5.8.0", "typescript": "^5.8.0",

View File

@@ -1,107 +0,0 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import type { DbHandle } from '@mosaicstack/db';
// Mock @mosaicstack/db before importing the adapter
vi.mock('@mosaicstack/db', async (importOriginal) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const actual = await importOriginal<Record<string, any>>();
return {
...actual,
createDb: vi.fn(),
runMigrations: vi.fn().mockResolvedValue(undefined),
};
});
import { createDb, runMigrations } from '@mosaicstack/db';
import { PostgresAdapter } from './postgres.js';
describe('PostgresAdapter — vector extension gating', () => {
let mockExecute: ReturnType<typeof vi.fn>;
let mockDb: { execute: ReturnType<typeof vi.fn> };
let mockHandle: Pick<DbHandle, 'close'> & { db: typeof mockDb };
beforeEach(() => {
vi.clearAllMocks();
mockExecute = vi.fn().mockResolvedValue(undefined);
mockDb = { execute: mockExecute };
mockHandle = { db: mockDb, close: vi.fn().mockResolvedValue(undefined) };
vi.mocked(createDb).mockReturnValue(mockHandle as unknown as DbHandle);
});
it('calls db.execute with CREATE EXTENSION IF NOT EXISTS vector when enableVector=true', async () => {
const adapter = new PostgresAdapter({
type: 'postgres',
url: 'postgresql://test:test@localhost:5432/test',
enableVector: true,
});
await adapter.migrate();
// Should have called execute
expect(mockExecute).toHaveBeenCalledTimes(1);
// Verify the SQL contains the extension creation statement.
// Prefer Drizzle's public toSQL() API; fall back to queryChunks if unavailable.
// NOTE: queryChunks is an undocumented Drizzle internal (drizzle-orm ^0.45.x).
// toSQL() was not present on the raw sql`` result in this version — if a future
// Drizzle upgrade adds it, remove the fallback path and delete this comment.
const sqlObj = mockExecute.mock.calls[0]![0] as {
toSQL?: () => { sql: string; params: unknown[] };
queryChunks?: Array<{ value: string[] }>;
};
const sqlText = sqlObj.toSQL
? sqlObj.toSQL().sql.toLowerCase()
: (sqlObj.queryChunks ?? [])
.flatMap((chunk) => chunk.value)
.join('')
.toLowerCase();
expect(sqlText).toContain('create extension if not exists vector');
});
it('does NOT call db.execute for extension when enableVector is false', async () => {
const adapter = new PostgresAdapter({
type: 'postgres',
url: 'postgresql://test:test@localhost:5432/test',
enableVector: false,
});
await adapter.migrate();
expect(mockExecute).not.toHaveBeenCalled();
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
});
it('does NOT call db.execute for extension when enableVector is unset', async () => {
const adapter = new PostgresAdapter({
type: 'postgres',
url: 'postgresql://test:test@localhost:5432/test',
});
await adapter.migrate();
expect(mockExecute).not.toHaveBeenCalled();
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
});
it('calls runMigrations after the extension is created', async () => {
const callOrder: string[] = [];
mockExecute.mockImplementation(() => {
callOrder.push('execute');
return Promise.resolve(undefined);
});
vi.mocked(runMigrations).mockImplementation(() => {
callOrder.push('runMigrations');
return Promise.resolve();
});
const adapter = new PostgresAdapter({
type: 'postgres',
url: 'postgresql://test:test@localhost:5432/test',
enableVector: true,
});
await adapter.migrate();
expect(callOrder).toEqual(['execute', 'runMigrations']);
});
});

View File

@@ -66,19 +66,13 @@ export class PostgresAdapter implements StorageAdapter {
private handle: DbHandle; private handle: DbHandle;
private db: Db; private db: Db;
private url: string; private url: string;
private enableVector: boolean;
constructor(config: Extract<StorageConfig, { type: 'postgres' }>) { constructor(config: Extract<StorageConfig, { type: 'postgres' }>) {
this.url = config.url; this.url = config.url;
this.enableVector = config.enableVector ?? false;
this.handle = createDb(config.url); this.handle = createDb(config.url);
this.db = this.handle.db; this.db = this.handle.db;
} }
private async ensureVectorExtension(): Promise<void> {
await this.db.execute(sql`CREATE EXTENSION IF NOT EXISTS vector`);
}
async create<T extends Record<string, unknown>>( async create<T extends Record<string, unknown>>(
collection: string, collection: string,
data: T, data: T,
@@ -155,9 +149,6 @@ export class PostgresAdapter implements StorageAdapter {
} }
async migrate(): Promise<void> { async migrate(): Promise<void> {
if (this.enableVector) {
await this.ensureVectorExtension();
}
await runMigrations(this.url); await runMigrations(this.url);
} }

View File

@@ -1,5 +1,4 @@
import type { Command } from 'commander'; import type { Command } from 'commander';
import type { MigrationSource } from './migrate-tier.js';
/** /**
* Reads the DATABASE_URL environment variable and redacts the password portion. * Reads the DATABASE_URL environment variable and redacts the password portion.
@@ -210,203 +209,6 @@ export function registerStorageCommand(parent: Command): void {
} }
}); });
// ── storage migrate-tier ─────────────────────────────────────────────────
storage
.command('migrate-tier')
.description('Migrate data from tier: local/standalone → tier: federated (Postgres + pgvector)')
.requiredOption(
'--to <tier>',
'Target tier to migrate to (only "federated" is supported)',
'federated',
)
.requiredOption('--target-url <url>', 'Target federated Postgres connection string (required)')
.option(
'--source-config <path>',
'Path to mosaic.config.json (default: cwd/mosaic.config.json)',
)
.option('--dry-run', 'Print what would be migrated without writing anything')
.option('--yes', 'Skip interactive confirmation prompt (required for non-TTY environments)')
.option('--batch-size <n>', 'Rows per transaction batch', '1000')
.option('--allow-non-empty', 'Allow writing to a non-empty target (upsert — idempotent)')
.action(
async (opts: {
to: string;
targetUrl: string;
sourceConfig?: string;
dryRun?: boolean;
yes?: boolean;
batchSize?: string;
allowNonEmpty?: boolean;
}) => {
if (opts.to !== 'federated') {
console.error(
`[migrate-tier] --to "${opts.to}" is not supported. Only "federated" is allowed.`,
);
process.exitCode = 1;
return;
}
const batchSize = parseInt(opts.batchSize ?? '1000', 10);
if (isNaN(batchSize) || batchSize < 1) {
console.error('[migrate-tier] --batch-size must be a positive integer.');
process.exitCode = 1;
return;
}
// Redact target URL password for display.
function redactUrl(url: string): string {
try {
const parsed = new URL(url);
if (parsed.password) parsed.password = '***';
return parsed.toString();
} catch {
return url.replace(/:([^@/]+)@/, ':***@');
}
}
const redactedTarget = redactUrl(opts.targetUrl);
const isDryRun = opts.dryRun ?? false;
const allowNonEmpty = opts.allowNonEmpty ?? false;
// Determine source tier from environment.
const sourceTier = activeTier();
const sourceDesc = configSource();
console.log('');
console.log('[migrate-tier] ─────────────────────────────────────────');
console.log(`[migrate-tier] Source tier: ${sourceTier}`);
console.log(`[migrate-tier] Source: ${sourceDesc}`);
console.log(`[migrate-tier] Target tier: federated (Postgres + pgvector)`);
console.log(`[migrate-tier] Target: ${redactedTarget}`);
console.log(`[migrate-tier] Batch size: ${batchSize.toString()}`);
console.log(`[migrate-tier] Dry run: ${isDryRun.toString()}`);
console.log(`[migrate-tier] Allow non-empty: ${allowNonEmpty.toString()}`);
console.log('[migrate-tier] ─────────────────────────────────────────');
console.log('');
// Lazy-import core migration logic to keep the CLI thin.
const {
runMigrateTier,
PostgresMigrationTarget,
DrizzleMigrationSource,
getMigrationOrder,
} = await import('./migrate-tier.js');
// Build source adapter using Drizzle-backed DrizzleMigrationSource.
// Both local (PGlite) and standalone (Postgres) sources expose the same
// normalized Drizzle schema — this is where the actual domain data lives.
let sourceAdapter: MigrationSource;
if (sourceTier === 'pglite') {
const { createPgliteDb } = await import('@mosaicstack/db');
const pgliteDataDir = process.env['PGLITE_DATA_DIR'];
if (!pgliteDataDir) {
console.error(
'[migrate-tier] PGLITE_DATA_DIR is not set. ' +
'Cannot open PGlite source — set it to the data directory path.',
);
process.exitCode = 1;
return;
}
const handle = createPgliteDb(pgliteDataDir);
// Local/PGlite sources do not have pgvector registered — the embedding
// column is omitted from the insights SELECT and set to null on target.
sourceAdapter = new DrizzleMigrationSource(handle.db, /* sourceHasVector= */ false);
} else {
const { createDb } = await import('@mosaicstack/db');
const url = process.env['DATABASE_URL'];
if (!url) {
console.error('[migrate-tier] DATABASE_URL is not set for postgres source.');
process.exitCode = 1;
return;
}
const handle = createDb(url);
// Standalone Postgres may or may not have pgvector — assume it does not
// (it is a non-federated tier) so embedding is treated as null.
sourceAdapter = new DrizzleMigrationSource(handle.db, /* sourceHasVector= */ false);
}
// Print per-table row counts for the confirmation prompt.
const tablesToMigrate = getMigrationOrder();
const counts: Array<{ table: string; count: number }> = [];
for (const table of tablesToMigrate) {
const n = await sourceAdapter.count(table);
counts.push({ table, count: n });
}
console.log('[migrate-tier] Source row counts:');
for (const { table, count } of counts) {
console.log(` ${table}: ${count.toString()}`);
}
console.log(' sessions: SKIPPED (ephemeral)');
console.log(' verifications: SKIPPED (ephemeral)');
console.log(' admin_tokens: SKIPPED (environment-specific)');
console.log('');
// Interactive confirmation unless --yes or dry-run.
const isTTY = process.stdin.isTTY;
if (!isDryRun) {
if (!opts.yes && !isTTY) {
console.error(
'[migrate-tier] Not running in a TTY and --yes was not passed. ' +
'Pass --yes to confirm in headless environments.',
);
process.exitCode = 1;
await sourceAdapter.close();
return;
}
if (!opts.yes) {
const { createInterface } = await import('node:readline');
const rl = createInterface({ input: process.stdin, output: process.stdout });
const answer = await new Promise<string>((resolve) => {
rl.question(`This will WRITE to ${redactedTarget}. Continue? [y/N] `, (ans) => {
rl.close();
resolve(ans);
});
});
if (answer.trim().toLowerCase() !== 'y') {
console.log('[migrate-tier] Aborted.');
await sourceAdapter.close();
return;
}
}
}
const target = new PostgresMigrationTarget(opts.targetUrl);
try {
const result = await runMigrateTier(
sourceAdapter,
target,
{
targetUrl: opts.targetUrl,
dryRun: isDryRun,
allowNonEmpty,
batchSize,
onProgress: (msg) => console.log(msg),
},
/* sourceHasVector= */ sourceTier === 'postgres',
);
if (result.dryRun) {
console.log('[migrate-tier] Dry run complete. No data was written.');
} else {
console.log(
`[migrate-tier] Migration complete. ${result.totalRows.toString()} rows migrated.`,
);
}
} catch (err) {
console.error(
`[migrate-tier] ERROR: ${err instanceof Error ? err.message : String(err)}`,
);
process.exitCode = 1;
} finally {
await Promise.all([sourceAdapter.close(), target.close()]);
}
},
);
// ── storage migrate ────────────────────────────────────────────────────── // ── storage migrate ──────────────────────────────────────────────────────
storage storage

View File

@@ -1,29 +1,8 @@
export type { StorageAdapter, StorageConfig } from './types.js'; export type { StorageAdapter, StorageConfig } from './types.js';
export { TierDetectionError, detectAndAssertTier, probeServiceHealth } from './tier-detection.js';
export type { ServiceCheck, TierHealthReport } from './tier-detection.js';
export { createStorageAdapter, registerStorageAdapter } from './factory.js'; export { createStorageAdapter, registerStorageAdapter } from './factory.js';
export { PostgresAdapter } from './adapters/postgres.js'; export { PostgresAdapter } from './adapters/postgres.js';
export { PgliteAdapter } from './adapters/pglite.js'; export { PgliteAdapter } from './adapters/pglite.js';
export { registerStorageCommand } from './cli.js'; export { registerStorageCommand } from './cli.js';
export {
getMigrationOrder,
topoSort,
runMigrateTier,
checkTargetPreconditions,
normaliseSourceRow,
PostgresMigrationTarget,
DrizzleMigrationSource,
SKIP_TABLES,
MIGRATION_ORDER,
MigrationPreconditionError,
} from './migrate-tier.js';
export type {
MigrationSource,
MigrationTarget,
MigrateTierOptions,
MigrateTierResult,
TableMigrationResult,
} from './migrate-tier.js';
import { registerStorageAdapter } from './factory.js'; import { registerStorageAdapter } from './factory.js';
import { PostgresAdapter } from './adapters/postgres.js'; import { PostgresAdapter } from './adapters/postgres.js';

View File

@@ -1,495 +0,0 @@
/**
* migrate-tier.spec.ts — Unit tests for the migrate-tier core logic.
*
* These are pure unit tests — no real database connections.
* FED-M1-08 will add integration tests against real services.
*/
import { describe, it, expect, vi } from 'vitest';
import {
getMigrationOrder,
topoSort,
runMigrateTier,
checkTargetPreconditions,
normaliseSourceRow,
SKIP_TABLES,
MigrationPreconditionError,
type MigrationSource,
type MigrationTarget,
} from './migrate-tier.js';
/* ------------------------------------------------------------------ */
/* Mock factories */
/* ------------------------------------------------------------------ */
/**
* Build a mock MigrationSource backed by an in-memory table map.
* Implements the DrizzleMigrationSource-shaped contract:
* - readTable(tableName, opts?) returns paginated rows
* - count(tableName) returns row count
*
* The `sourceHasVector` flag controls whether the mock simulates the
* no-pgvector projection: when false and tableName is 'insights', rows
* are returned WITHOUT the 'embedding' field (matching DrizzleMigrationSource
* behaviour for local/PGlite sources).
*/
function makeMockSource(
data: Record<string, Record<string, unknown>[]>,
sourceHasVector = true,
): MigrationSource & {
readTableCalls: Array<{ table: string; opts?: { limit?: number; offset?: number } }>;
} {
const readTableCalls: Array<{ table: string; opts?: { limit?: number; offset?: number } }> = [];
return {
readTableCalls,
readTable: vi.fn(async (tableName: string, opts?: { limit?: number; offset?: number }) => {
readTableCalls.push({ table: tableName, opts });
let rows = data[tableName] ?? [];
// Simulate no-vector projection: omit 'embedding' from insights rows
// when sourceHasVector is false (matches DrizzleMigrationSource behaviour).
if (tableName === 'insights' && !sourceHasVector) {
rows = rows.map(({ embedding: _omit, ...rest }) => rest);
}
const offset = opts?.offset ?? 0;
const limit = opts?.limit ?? rows.length;
return rows.slice(offset, offset + limit);
}),
count: vi.fn(async (tableName: string) => (data[tableName] ?? []).length),
close: vi.fn(async () => undefined),
};
}
function makeMockTarget(opts?: {
hasPgvector?: boolean;
nonEmptyTable?: string;
}): MigrationTarget & { upsertCalls: Array<{ table: string; rows: Record<string, unknown>[] }> } {
const upsertCalls: Array<{ table: string; rows: Record<string, unknown>[] }> = [];
const storedCounts: Record<string, number> = {};
return {
upsertCalls,
upsertBatch: vi.fn(async (table: string, rows: Record<string, unknown>[]) => {
upsertCalls.push({ table, rows });
storedCounts[table] = (storedCounts[table] ?? 0) + rows.length;
}),
count: vi.fn(async (table: string) => {
if (opts?.nonEmptyTable === table) return 5;
return storedCounts[table] ?? 0;
}),
hasPgvector: vi.fn(async () => opts?.hasPgvector ?? true),
close: vi.fn(async () => undefined),
};
}
function noopProgress(): (msg: string) => void {
return () => undefined;
}
/* ------------------------------------------------------------------ */
/* 1. Topological ordering */
/* ------------------------------------------------------------------ */
describe('topoSort', () => {
it('returns empty array for empty input', () => {
expect(topoSort(new Map())).toEqual([]);
});
it('orders parents before children — linear chain', () => {
// users -> teams -> messages
const deps = new Map([
['users', []],
['teams', ['users']],
['messages', ['teams']],
]);
const order = topoSort(deps);
expect(order.indexOf('users')).toBeLessThan(order.indexOf('teams'));
expect(order.indexOf('teams')).toBeLessThan(order.indexOf('messages'));
});
it('orders parents before children — diamond graph', () => {
// a -> (b, c) -> d
const deps = new Map([
['a', []],
['b', ['a']],
['c', ['a']],
['d', ['b', 'c']],
]);
const order = topoSort(deps);
expect(order.indexOf('a')).toBeLessThan(order.indexOf('b'));
expect(order.indexOf('a')).toBeLessThan(order.indexOf('c'));
expect(order.indexOf('b')).toBeLessThan(order.indexOf('d'));
expect(order.indexOf('c')).toBeLessThan(order.indexOf('d'));
});
it('throws on cyclic dependencies', () => {
const deps = new Map([
['a', ['b']],
['b', ['a']],
]);
expect(() => topoSort(deps)).toThrow('Cycle detected');
});
});
/* ------------------------------------------------------------------ */
/* 2. getMigrationOrder — sessions / verifications excluded */
/* ------------------------------------------------------------------ */
describe('getMigrationOrder', () => {
it('does not include "sessions"', () => {
expect(getMigrationOrder()).not.toContain('sessions');
});
it('does not include "verifications"', () => {
expect(getMigrationOrder()).not.toContain('verifications');
});
it('does not include "admin_tokens"', () => {
expect(getMigrationOrder()).not.toContain('admin_tokens');
});
it('includes "users" before "teams"', () => {
const order = getMigrationOrder();
expect(order.indexOf('users')).toBeLessThan(order.indexOf('teams'));
});
it('includes "users" before "conversations"', () => {
const order = getMigrationOrder();
expect(order.indexOf('users')).toBeLessThan(order.indexOf('conversations'));
});
it('includes "conversations" before "messages"', () => {
const order = getMigrationOrder();
expect(order.indexOf('conversations')).toBeLessThan(order.indexOf('messages'));
});
it('includes "projects" before "agents"', () => {
const order = getMigrationOrder();
expect(order.indexOf('projects')).toBeLessThan(order.indexOf('agents'));
});
it('includes "agents" before "conversations"', () => {
const order = getMigrationOrder();
expect(order.indexOf('agents')).toBeLessThan(order.indexOf('conversations'));
});
it('includes "missions" before "mission_tasks"', () => {
const order = getMigrationOrder();
expect(order.indexOf('missions')).toBeLessThan(order.indexOf('mission_tasks'));
});
it('includes all expected tables', () => {
const order = getMigrationOrder();
const expected = [
'users',
'teams',
'accounts',
'projects',
'agents',
'conversations',
'messages',
'insights',
];
for (const t of expected) {
expect(order).toContain(t);
}
});
});
/* ------------------------------------------------------------------ */
/* 3. Dry-run makes no writes */
/* ------------------------------------------------------------------ */
describe('runMigrateTier — dry-run', () => {
it('makes no calls to upsertBatch', async () => {
const source = makeMockSource({
users: [{ id: 'u1', name: 'Alice', email: 'alice@example.com' }],
});
const target = makeMockTarget();
const result = await runMigrateTier(source, target, {
targetUrl: 'postgresql://localhost/test',
dryRun: true,
allowNonEmpty: false,
batchSize: 100,
onProgress: noopProgress(),
});
expect(target.upsertCalls).toHaveLength(0);
expect(result.dryRun).toBe(true);
expect(result.totalRows).toBe(0);
});
it('does not call checkTargetPreconditions in dry-run', async () => {
// Even if hasPgvector is false, dry-run should not throw.
const source = makeMockSource({});
const target = makeMockTarget({ hasPgvector: false });
await expect(
runMigrateTier(source, target, {
targetUrl: 'postgresql://localhost/test',
dryRun: true,
allowNonEmpty: false,
batchSize: 100,
onProgress: noopProgress(),
}),
).resolves.not.toThrow();
// hasPgvector should NOT have been called during dry run.
expect(target.hasPgvector).not.toHaveBeenCalled();
});
});
/* ------------------------------------------------------------------ */
/* 4. Idempotency */
/* ------------------------------------------------------------------ */
describe('runMigrateTier — idempotency', () => {
it('produces the same logical row count on second run (upsert semantics)', async () => {
const userData = [
{ id: 'u1', name: 'Alice', email: 'alice@example.com' },
{ id: 'u2', name: 'Bob', email: 'bob@example.com' },
];
const source = makeMockSource({ users: userData });
// First run target.
const target1 = makeMockTarget();
await runMigrateTier(source, target1, {
targetUrl: 'postgresql://localhost/test',
dryRun: false,
allowNonEmpty: false,
batchSize: 100,
onProgress: noopProgress(),
});
const firstRunUpserts = target1.upsertCalls.filter((c) => c.table === 'users');
const firstRunRows = firstRunUpserts.reduce((acc, c) => acc + c.rows.length, 0);
// Second run — allowNonEmpty because first run already wrote rows.
const target2 = makeMockTarget();
await runMigrateTier(source, target2, {
targetUrl: 'postgresql://localhost/test',
dryRun: false,
allowNonEmpty: true,
batchSize: 100,
onProgress: noopProgress(),
});
const secondRunUpserts = target2.upsertCalls.filter((c) => c.table === 'users');
const secondRunRows = secondRunUpserts.reduce((acc, c) => acc + c.rows.length, 0);
// Both runs write the same number of rows (upsert — second run updates in place).
expect(firstRunRows).toBe(userData.length);
expect(secondRunRows).toBe(userData.length);
});
});
/* ------------------------------------------------------------------ */
/* 5. Empty-target precondition */
/* ------------------------------------------------------------------ */
describe('checkTargetPreconditions', () => {
it('throws when target table is non-empty and allowNonEmpty is false', async () => {
const target = makeMockTarget({ nonEmptyTable: 'users' });
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toThrow(
MigrationPreconditionError,
);
});
it('includes remediation hint in thrown error', async () => {
const target = makeMockTarget({ nonEmptyTable: 'users' });
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toMatchObject({
name: 'MigrationPreconditionError',
remediation: expect.stringContaining('--allow-non-empty'),
});
});
it('does NOT throw when allowNonEmpty is true', async () => {
const target = makeMockTarget({ nonEmptyTable: 'users' });
await expect(checkTargetPreconditions(target, true, ['users'])).resolves.not.toThrow();
});
it('throws when pgvector extension is missing', async () => {
const target = makeMockTarget({ hasPgvector: false });
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toMatchObject({
name: 'MigrationPreconditionError',
remediation: expect.stringContaining('pgvector'),
});
});
it('passes when target is empty and pgvector is present', async () => {
const target = makeMockTarget({ hasPgvector: true });
await expect(checkTargetPreconditions(target, false, ['users'])).resolves.not.toThrow();
});
});
/* ------------------------------------------------------------------ */
/* 6. Skipped tables documented */
/* ------------------------------------------------------------------ */
describe('SKIP_TABLES', () => {
it('includes "sessions"', () => {
expect(SKIP_TABLES.has('sessions')).toBe(true);
});
it('includes "verifications"', () => {
expect(SKIP_TABLES.has('verifications')).toBe(true);
});
it('includes "admin_tokens"', () => {
expect(SKIP_TABLES.has('admin_tokens')).toBe(true);
});
it('migration result includes skipped table entries', async () => {
const source = makeMockSource({});
const target = makeMockTarget();
const result = await runMigrateTier(source, target, {
targetUrl: 'postgresql://localhost/test',
dryRun: false,
allowNonEmpty: false,
batchSize: 100,
onProgress: noopProgress(),
});
const skippedNames = result.tables.filter((t) => t.skipped).map((t) => t.table);
expect(skippedNames).toContain('sessions');
expect(skippedNames).toContain('verifications');
expect(skippedNames).toContain('admin_tokens');
});
});
/* ------------------------------------------------------------------ */
/* 7. Embedding NULL on migrate from non-pgvector source */
/* ------------------------------------------------------------------ */
describe('normaliseSourceRow — embedding handling', () => {
it('sets embedding to null when sourceHasVector is false and table is insights', () => {
const row: Record<string, unknown> = {
id: 'ins-1',
content: 'Some insight',
userId: 'u1',
};
const normalised = normaliseSourceRow('insights', row, false);
expect(normalised['embedding']).toBeNull();
});
it('preserves existing embedding when sourceHasVector is true', () => {
const embedding = [0.1, 0.2, 0.3];
const row: Record<string, unknown> = {
id: 'ins-1',
content: 'Some insight',
userId: 'u1',
embedding,
};
const normalised = normaliseSourceRow('insights', row, true);
expect(normalised['embedding']).toBe(embedding);
});
it('does not add embedding field to non-vector tables', () => {
const row: Record<string, unknown> = { id: 'u1', name: 'Alice' };
const normalised = normaliseSourceRow('users', row, false);
expect('embedding' in normalised).toBe(false);
});
it('passes through rows for non-vector tables unchanged', () => {
const row: Record<string, unknown> = { id: 'u1', name: 'Alice', email: 'alice@test.com' };
const normalised = normaliseSourceRow('users', row, false);
expect(normalised).toEqual(row);
});
});
/* ------------------------------------------------------------------ */
/* 8. End-to-end: correct order of upsert calls */
/* ------------------------------------------------------------------ */
describe('runMigrateTier — migration order', () => {
it('writes users before messages', async () => {
const source = makeMockSource({
users: [{ id: 'u1', name: 'Alice', email: 'alice@test.com' }],
messages: [{ id: 'm1', conversationId: 'c1', role: 'user', content: 'Hi' }],
});
const target = makeMockTarget();
await runMigrateTier(source, target, {
targetUrl: 'postgresql://localhost/test',
dryRun: false,
allowNonEmpty: false,
batchSize: 100,
onProgress: noopProgress(),
});
const tableOrder = target.upsertCalls.map((c) => c.table);
const usersIdx = tableOrder.indexOf('users');
const messagesIdx = tableOrder.indexOf('messages');
// users must appear before messages in the upsert call sequence.
expect(usersIdx).toBeGreaterThanOrEqual(0);
expect(messagesIdx).toBeGreaterThanOrEqual(0);
expect(usersIdx).toBeLessThan(messagesIdx);
});
});
/* ------------------------------------------------------------------ */
/* 9. Embedding-null projection: no-pgvector source */
/* ------------------------------------------------------------------ */
describe('DrizzleMigrationSource embedding-null projection', () => {
it(
'when sourceHasVector is false, readTable for insights omits embedding column ' +
'and normaliseSourceRow sets it to null for the target insert',
async () => {
// Source has insights data but no vector — embedding omitted at read time.
const insightRowWithEmbedding = {
id: 'ins-1',
userId: 'u1',
content: 'Test insight',
embedding: [0.1, 0.2, 0.3], // present in raw data but omitted by source
source: 'agent',
category: 'general',
relevanceScore: 1.0,
};
// makeMockSource with sourceHasVector=false simulates DrizzleMigrationSource
// behaviour: the embedding field is stripped from the returned row.
const source = makeMockSource(
{
users: [{ id: 'u1', name: 'Alice', email: 'alice@test.com' }],
insights: [insightRowWithEmbedding],
},
/* sourceHasVector= */ false,
);
const target = makeMockTarget();
await runMigrateTier(
source,
target,
{
targetUrl: 'postgresql://localhost/test',
dryRun: false,
allowNonEmpty: false,
batchSize: 100,
onProgress: noopProgress(),
},
/* sourceHasVector= */ false,
);
// Assert: readTable was called for insights
const insightsRead = source.readTableCalls.find((c) => c.table === 'insights');
expect(insightsRead).toBeDefined();
// Assert: the upsert to insights has embedding === null (not the original vector)
const insightsUpsert = target.upsertCalls.find((c) => c.table === 'insights');
expect(insightsUpsert).toBeDefined();
const upsertedRow = insightsUpsert!.rows[0];
expect(upsertedRow).toBeDefined();
// embedding must be null — not the original [0.1, 0.2, 0.3]
expect(upsertedRow!['embedding']).toBeNull();
// Other fields must pass through unchanged
expect(upsertedRow!['id']).toBe('ins-1');
expect(upsertedRow!['content']).toBe('Test insight');
},
);
});

View File

@@ -1,697 +0,0 @@
/**
* migrate-tier.ts — Core logic for `mosaic storage migrate-tier`.
*
* Migrates data from `tier: local` (PGlite, normalized Drizzle schema) or
* `tier: standalone` (Postgres without pgvector) → `tier: federated`
* (Postgres + pgvector).
*
* Source: DrizzleMigrationSource — reads from the NORMALIZED Drizzle/relational
* schema tables (not the flat `id TEXT + data JSONB` PgliteAdapter schema).
* Both local (PGlite) and standalone (Postgres) sources use the same Drizzle
* abstraction via createPgliteDb() or createDb() from @mosaicstack/db.
* Target: PostgresMigrationTarget — upserts via raw SQL into the same schema.
*
* Key design decisions:
* - Tables are migrated in topological (FK-dependency) order so that
* parent rows exist before child rows are inserted.
* - sessions + verifications are skipped — they are ephemeral / TTL'd.
* - adminTokens is skipped — token hashes are environment-specific
* and should be re-issued on the target.
* - insights.embedding is omitted from source SELECT when the source lacks
* pgvector (local/PGlite tier); target insert gets NULL for that column.
* insights.embedding is nullable per schema (no .notNull() constraint).
* - Each table's batch is wrapped in a transaction for atomicity.
* - Upsert semantics (ON CONFLICT DO UPDATE) make re-runs idempotent.
*
* TODO (FED-M1-08): Add integration tests against real PGlite → real PG.
*/
import postgres from 'postgres';
import * as schema from '@mosaicstack/db';
import { sql as drizzleSql } from '@mosaicstack/db';
/* ------------------------------------------------------------------ */
/* Types */
/* ------------------------------------------------------------------ */
export interface MigrationSource {
/**
* Return all rows from a table (normalized Drizzle schema rows).
* When sourceHasVector is false and the table has a vector column,
* the source MUST omit the vector column from the result and the
* caller will set it to null (see normaliseSourceRow).
*/
readTable(
tableName: string,
opts?: { limit?: number; offset?: number },
): Promise<Record<string, unknown>[]>;
/** Count rows in a table. */
count(tableName: string): Promise<number>;
/** Close the source connection. */
close(): Promise<void>;
}
export interface MigrationTarget {
/**
* Upsert a batch of rows into a table.
* Must use ON CONFLICT (id) DO UPDATE semantics.
*/
upsertBatch(tableName: string, rows: Record<string, unknown>[]): Promise<void>;
/**
* Count rows in a target table.
*/
count(tableName: string): Promise<number>;
/**
* Check whether pgvector extension is installed.
*/
hasPgvector(): Promise<boolean>;
/** Close the target connection. */
close(): Promise<void>;
}
export interface MigrateTierOptions {
/** Target postgres connection URL. */
targetUrl: string;
/** Whether to skip all writes (dry-run). */
dryRun: boolean;
/** Skip the non-empty target guard. */
allowNonEmpty: boolean;
/** Rows per transaction batch. */
batchSize: number;
/** Called with progress messages. */
onProgress: (msg: string) => void;
}
export interface TableMigrationResult {
table: string;
rowsMigrated: number;
skipped: boolean;
skipReason?: string;
}
export interface MigrateTierResult {
tables: TableMigrationResult[];
totalRows: number;
dryRun: boolean;
}
/* ------------------------------------------------------------------ */
/* Schema: FK-aware topological table order */
/* ------------------------------------------------------------------ */
/**
* SKIP_TABLES: ephemeral or environment-specific tables not worth migrating.
*
* - sessions: TTL'd auth sessions — invalid in new environment.
* - verifications: one-time tokens (email verify, etc.) — already expired.
* - admin_tokens: hashed tokens bound to old environment keys — re-issue.
*/
export const SKIP_TABLES = new Set(['sessions', 'verifications', 'admin_tokens']);
/**
* Topologically ordered table list (parents before children).
*
* Derived from FK references in packages/db/src/schema.ts:
*
* users (no FKs)
* teams → users
* team_members → teams, users
* accounts → users
* projects → users, teams
* agents → projects, users
* missions → projects, users
* tasks → projects, missions
* mission_tasks → missions, tasks, users
* conversations → users, projects, agents
* messages → conversations
* preferences → users
* insights → users [has embedding vector column]
* agent_logs → users
* skills → users (installedBy, nullable)
* routing_rules → users (userId, nullable)
* provider_credentials → users
* appreciations (no FKs)
* events (no FKs)
* tickets (no FKs)
* summarization_jobs (no FKs)
*
* Skipped (not in this list):
* sessions → users (ephemeral)
* verifications (no FKs, ephemeral)
* admin_tokens → users (environment-specific)
*/
export const MIGRATION_ORDER: string[] = [
'users',
'teams',
'team_members',
'accounts',
'projects',
'agents',
'missions',
'tasks',
'mission_tasks',
'conversations',
'messages',
'preferences',
'insights',
'agent_logs',
'skills',
'routing_rules',
'provider_credentials',
'appreciations',
'events',
'tickets',
'summarization_jobs',
];
/** Tables that carry a vector embedding column on the target. */
const VECTOR_TABLES = new Set(['insights']);
/* ------------------------------------------------------------------ */
/* Utility: derive topological order from an adjacency list */
/* ------------------------------------------------------------------ */
/**
* Given an adjacency list (table → list of tables it depends on),
* return a valid topological ordering (Kahn's algorithm).
*
* Exposed for unit testing.
*/
export function topoSort(deps: Map<string, string[]>): string[] {
const nodes = [...deps.keys()];
const inDegree = new Map<string, number>();
const adjReverse = new Map<string, string[]>();
for (const node of nodes) {
if (!inDegree.has(node)) inDegree.set(node, 0);
if (!adjReverse.has(node)) adjReverse.set(node, []);
for (const dep of deps.get(node) ?? []) {
inDegree.set(node, (inDegree.get(node) ?? 0) + 1);
if (!adjReverse.has(dep)) adjReverse.set(dep, []);
adjReverse.get(dep)!.push(node);
}
}
// Start with nodes that have no dependencies.
const queue: string[] = [];
for (const [node, deg] of inDegree) {
if (deg === 0) queue.push(node);
}
const result: string[] = [];
while (queue.length > 0) {
const node = queue.shift()!;
result.push(node);
for (const dependent of adjReverse.get(node) ?? []) {
const newDeg = (inDegree.get(dependent) ?? 0) - 1;
inDegree.set(dependent, newDeg);
if (newDeg === 0) queue.push(dependent);
}
}
if (result.length !== nodes.length) {
throw new Error('Cycle detected in FK dependency graph');
}
return result;
}
/**
* Return the migration table order, excluding SKIP_TABLES.
* Uses the pre-computed MIGRATION_ORDER (verified against schema.ts).
*/
export function getMigrationOrder(): string[] {
return MIGRATION_ORDER.filter((t) => !SKIP_TABLES.has(t));
}
/* ------------------------------------------------------------------ */
/* TABLE_OBJECTS: migration table name → Drizzle table object */
/* ------------------------------------------------------------------ */
/**
* Maps MIGRATION_ORDER table names to their corresponding Drizzle table
* objects from the normalized schema. Used by DrizzleMigrationSource to
* execute typed `db.select().from(table)` queries.
*
* Keyed by snake_case table name (matching MIGRATION_ORDER + SKIP_TABLES).
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const TABLE_OBJECTS: Record<string, any> = {
users: schema.users,
teams: schema.teams,
team_members: schema.teamMembers,
accounts: schema.accounts,
projects: schema.projects,
agents: schema.agents,
missions: schema.missions,
tasks: schema.tasks,
mission_tasks: schema.missionTasks,
conversations: schema.conversations,
messages: schema.messages,
preferences: schema.preferences,
insights: schema.insights,
agent_logs: schema.agentLogs,
skills: schema.skills,
routing_rules: schema.routingRules,
provider_credentials: schema.providerCredentials,
appreciations: schema.appreciations,
events: schema.events,
tickets: schema.tickets,
summarization_jobs: schema.summarizationJobs,
// Skipped tables — included so count() works for preflight but never passed
// to upsertBatch.
sessions: schema.sessions,
verifications: schema.verifications,
admin_tokens: schema.adminTokens,
};
/* ------------------------------------------------------------------ */
/* DrizzleMigrationSource */
/* ------------------------------------------------------------------ */
/**
* MigrationSource backed by a Drizzle DB handle (works with both
* PostgresJsDatabase and PgliteDatabase — they share the same Drizzle
* query API for schema-defined tables).
*
* For the `insights` table (the only vector-column table), when the source
* lacks pgvector (local/PGlite tier), the `embedding` column is excluded
* from the SELECT projection via a raw `db.execute()` query that lists
* only non-vector columns. This prevents a type-registration error from
* PGlite, which does not know the `vector` type. The caller (runMigrateTier
* via normaliseSourceRow) will set embedding to null on the resulting rows.
*
* Column projection is opt-in: pass `sourceHasVector: false` to activate it.
*/
export class DrizzleMigrationSource implements MigrationSource {
constructor(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
private readonly db: any,
private readonly sourceHasVector: boolean = true,
) {}
/**
* Columns of the insights table that do NOT include the vector embedding.
* Used for the no-pgvector projection path.
*/
private static readonly INSIGHTS_COLUMNS_NO_VECTOR = [
'id',
'user_id',
'content',
'source',
'category',
'relevance_score',
'metadata',
'created_at',
'updated_at',
'decayed_at',
] as const;
async readTable(
tableName: string,
opts?: { limit?: number; offset?: number },
): Promise<Record<string, unknown>[]> {
const table = TABLE_OBJECTS[tableName];
if (!table) throw new Error(`DrizzleMigrationSource: unknown table "${tableName}"`);
// For vector tables when source lacks pgvector: use column-allowlist raw query
// to avoid type-registration errors.
if (VECTOR_TABLES.has(tableName) && !this.sourceHasVector) {
const cols = DrizzleMigrationSource.INSIGHTS_COLUMNS_NO_VECTOR.map((c) => `"${c}"`).join(
', ',
);
let sql = `SELECT ${cols} FROM "${tableName}"`;
const params: unknown[] = [];
if (opts?.limit !== undefined) {
params.push(opts.limit);
sql += ` LIMIT $${params.length.toString()}`;
}
if (opts?.offset !== undefined) {
params.push(opts.offset);
sql += ` OFFSET $${params.length.toString()}`;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const result = await (this.db as any).execute(
// drizzle-orm/pglite and drizzle-orm/postgres-js both accept a raw
// SQL template; use the tagged-template sql helper from drizzle-orm.
// Since we need dynamic params, we use db.execute with a raw string
// via the PGlite/postgres.js driver directly.
{ sql, params, typings: [] },
);
// drizzle execute returns { rows: unknown[][] } for PGlite driver,
// or a RowList for postgres.js. Normalise both shapes.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const raw = result as any;
if (Array.isArray(raw)) {
// postgres.js shape: array of row objects
return raw as Record<string, unknown>[];
}
if (raw?.rows && Array.isArray(raw.rows)) {
// PGlite shape: { rows: unknown[][] } OR { rows: Record<string,unknown>[] }
const rows = raw.rows as unknown[];
if (rows.length === 0) return [];
if (Array.isArray(rows[0])) {
// Columnar: convert to objects using fields array if available
const fields: string[] =
(raw.fields as Array<{ name: string }> | undefined)?.map((f) => f.name) ??
DrizzleMigrationSource.INSIGHTS_COLUMNS_NO_VECTOR.slice();
return (rows as unknown[][]).map((row) => {
const obj: Record<string, unknown> = {};
for (let i = 0; i < fields.length; i++) {
obj[fields[i]!] = row[i];
}
return obj;
});
}
return rows as Record<string, unknown>[];
}
return [];
}
// Standard Drizzle select for all other tables (and vector tables when
// the source has pgvector registered).
// eslint-disable-next-line @typescript-eslint/no-explicit-any
let query = (this.db as any).select().from(table);
if (opts?.limit !== undefined) query = query.limit(opts.limit);
if (opts?.offset !== undefined) query = query.offset(opts.offset);
return (await query) as Record<string, unknown>[];
}
async count(tableName: string): Promise<number> {
const table = TABLE_OBJECTS[tableName];
if (!table) throw new Error(`DrizzleMigrationSource: unknown table "${tableName}"`);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const [row] = await (this.db as any)
.select({ n: drizzleSql<number>`COUNT(*)::int` })
.from(table);
return (row as { n: number } | undefined)?.n ?? 0;
}
async close(): Promise<void> {
// Lifecycle managed externally — caller closes the db handle.
}
}
/* ------------------------------------------------------------------ */
/* Real postgres target adapter */
/* ------------------------------------------------------------------ */
/**
* Live implementation of MigrationTarget backed by a real Postgres connection.
* Used by the CLI; mocked in tests.
*/
export class PostgresMigrationTarget implements MigrationTarget {
private sql: ReturnType<typeof postgres>;
constructor(url: string) {
this.sql = postgres(url, {
max: 5,
connect_timeout: 10,
idle_timeout: 30,
});
}
async upsertBatch(tableName: string, rows: Record<string, unknown>[]): Promise<void> {
if (rows.length === 0) return;
// Collect all column names from the batch (union of all row keys).
const colSet = new Set<string>();
for (const row of rows) {
for (const k of Object.keys(row)) colSet.add(k);
}
const cols = [...colSet];
if (!cols.includes('id')) {
throw new Error(`Table ${tableName}: rows missing 'id' column`);
}
// Build VALUES list — use postgres tagged-template helpers for safety.
// postgres.js supports bulk inserts via array of objects.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
await this.sql.begin(async (tx: any) => {
// Insert in chunks to avoid enormous single queries.
for (let i = 0; i < rows.length; i += 500) {
const chunk = rows.slice(i, i + 500);
// Normalise rows: fill missing columns with null.
const normalised = chunk.map((row) => {
const out: Record<string, unknown> = {};
for (const col of cols) {
out[col] = row[col] ?? null;
}
return out;
});
const colList = cols.map((c) => `"${c}"`).join(', ');
const updateList = cols
.filter((c) => c !== 'id')
.map((c) => `"${c}" = EXCLUDED."${c}"`)
.join(', ');
// Build values placeholders
const valuePlaceholders = normalised
.map((_, ri) => `(${cols.map((_, ci) => `$${ri * cols.length + ci + 1}`).join(', ')})`)
.join(', ');
const flatValues = normalised.flatMap((row) => cols.map((c) => row[c] ?? null));
const query = `
INSERT INTO "${tableName}" (${colList})
VALUES ${valuePlaceholders}
ON CONFLICT (id) DO UPDATE SET ${updateList}
`;
await tx.unsafe(query, flatValues as never[]);
}
});
}
async count(tableName: string): Promise<number> {
const rows = await this.sql.unsafe(`SELECT COUNT(*)::int AS n FROM "${tableName}"`);
const row = rows[0] as unknown as { n: number } | undefined;
return row?.n ?? 0;
}
async hasPgvector(): Promise<boolean> {
const rows = await this.sql`
SELECT 1 FROM pg_extension WHERE extname = 'vector'
`;
return rows.length > 0;
}
async close(): Promise<void> {
await this.sql.end();
}
}
/* ------------------------------------------------------------------ */
/* Source-row normalisation */
/* ------------------------------------------------------------------ */
/**
* Drizzle returns rows as camelCase TypeScript objects (e.g. `userId`, not
* `user_id`). The PostgresMigrationTarget upserts via raw SQL and uses the
* column names as given — the `insights` no-vector path uses snake_case column
* aliases in the SELECT, so those rows already arrive as snake_case.
*
* For vector tables (insights), if `embedding` is absent from the source row
* (because DrizzleMigrationSource omitted it in the no-vector projection), we
* explicitly set it to null so the target ON CONFLICT UPDATE doesn't error.
*
* NOTE: insights.embedding is defined as `vector('embedding', { dimensions:
* 1536 })` with no `.notNull()` in schema.ts — it accepts NULL.
*/
export function normaliseSourceRow(
tableName: string,
row: Record<string, unknown>,
sourceHasVector: boolean,
): Record<string, unknown> {
const out = { ...row };
if (VECTOR_TABLES.has(tableName) && !sourceHasVector) {
// Source cannot have embeddings — explicitly null them so ON CONFLICT
// UPDATE doesn't try to write undefined.
out['embedding'] = null;
}
return out;
}
/* ------------------------------------------------------------------ */
/* Precondition checks */
/* ------------------------------------------------------------------ */
export class MigrationPreconditionError extends Error {
constructor(
message: string,
public readonly remediation: string,
) {
super(message);
this.name = 'MigrationPreconditionError';
}
}
/**
* Verify target preconditions before writing any data.
*
* Checks:
* 1. pgvector extension installed.
* 2. User-data tables are empty (unless --allow-non-empty).
*/
export async function checkTargetPreconditions(
target: MigrationTarget,
allowNonEmpty: boolean,
tablesToMigrate: string[],
): Promise<void> {
const hasVector = await target.hasPgvector();
if (!hasVector) {
throw new MigrationPreconditionError(
'Target Postgres does not have the pgvector extension installed.',
'Run: CREATE EXTENSION IF NOT EXISTS vector; — or use the pgvector/pgvector:pg17 Docker image.',
);
}
if (!allowNonEmpty) {
// Check the first non-empty user-data table.
for (const table of tablesToMigrate) {
const n = await target.count(table);
if (n > 0) {
throw new MigrationPreconditionError(
`Target table "${table}" already contains ${n.toString()} rows.`,
'Pass --allow-non-empty to overwrite existing data (upsert semantics), ' +
'or point to an empty target database.',
);
}
}
}
}
/* ------------------------------------------------------------------ */
/* Core migration runner */
/* ------------------------------------------------------------------ */
/**
* Run the tier migration.
*
* @param source Adapter for reading source rows.
* @param target Adapter for writing rows to target.
* @param opts Migration options.
* @param sourceHasVector True if the source tier supports vector columns.
*/
export async function runMigrateTier(
source: MigrationSource,
target: MigrationTarget,
opts: MigrateTierOptions,
sourceHasVector = false,
): Promise<MigrateTierResult> {
const { dryRun, allowNonEmpty, batchSize, onProgress } = opts;
const tablesToMigrate = getMigrationOrder();
// Preflight: gather row counts from source.
onProgress('[migrate-tier] Gathering source row counts...');
const sourceCounts = new Map<string, number>();
for (const table of tablesToMigrate) {
const n = await source.count(table);
sourceCounts.set(table, n);
}
// Log preflight summary.
onProgress('[migrate-tier] Tables to migrate:');
for (const table of tablesToMigrate) {
const n = sourceCounts.get(table) ?? 0;
onProgress(` ${table}: ${n.toString()} rows`);
}
for (const skipped of SKIP_TABLES) {
onProgress(` ${skipped}: SKIPPED (ephemeral/environment-specific)`);
}
// Vector column notice.
if (!sourceHasVector) {
onProgress(
'[migrate-tier] NOTE: Source tier has no pgvector support. ' +
'insights.embedding will be NULL on all migrated rows.',
);
}
if (dryRun) {
onProgress('[migrate-tier] DRY RUN — no writes will be made.');
const tables: TableMigrationResult[] = tablesToMigrate.map((t) => ({
table: t,
rowsMigrated: 0,
skipped: false,
}));
for (const skipped of SKIP_TABLES) {
tables.push({ table: skipped, rowsMigrated: 0, skipped: true, skipReason: 'ephemeral' });
}
return { tables, totalRows: 0, dryRun: true };
}
// Check preconditions before writing.
await checkTargetPreconditions(target, allowNonEmpty, tablesToMigrate);
const results: TableMigrationResult[] = [];
let totalRows = 0;
for (const table of tablesToMigrate) {
const sourceCount = sourceCounts.get(table) ?? 0;
if (sourceCount === 0) {
onProgress(`[migrate-tier] ${table}: 0 rows — skipping.`);
results.push({ table, rowsMigrated: 0, skipped: false });
continue;
}
onProgress(`[migrate-tier] ${table}: migrating ${sourceCount.toString()} rows...`);
let offset = 0;
let tableTotal = 0;
let lastSuccessfulId: string | undefined;
try {
while (offset < sourceCount) {
const rows = await source.readTable(table, { limit: batchSize, offset });
if (rows.length === 0) break;
const normalised = rows.map((row) => normaliseSourceRow(table, row, sourceHasVector));
await target.upsertBatch(table, normalised);
lastSuccessfulId = rows[rows.length - 1]?.['id'] as string | undefined;
tableTotal += rows.length;
offset += rows.length;
onProgress(
`[migrate-tier] ${table}: ${tableTotal.toString()}/${sourceCount.toString()} rows written`,
);
}
} catch (err) {
const errMsg = err instanceof Error ? err.message : String(err);
throw new Error(
`[migrate-tier] Failed on table "${table}" after ${tableTotal.toString()} rows ` +
`(last id: ${lastSuccessfulId ?? 'none'}). Error: ${errMsg}\n` +
`Remediation: Re-run with --allow-non-empty to resume (upsert is idempotent).`,
);
}
results.push({ table, rowsMigrated: tableTotal, skipped: false });
totalRows += tableTotal;
onProgress(`[migrate-tier] ${table}: done (${tableTotal.toString()} rows).`);
}
// Add skipped table records.
for (const skipped of SKIP_TABLES) {
results.push({
table: skipped,
rowsMigrated: 0,
skipped: true,
skipReason: 'ephemeral or environment-specific — re-issue on target',
});
}
onProgress(`[migrate-tier] Complete. ${totalRows.toString()} total rows migrated.`);
return { tables: results, totalRows, dryRun: false };
}

View File

@@ -1,546 +0,0 @@
/**
* Unit tests for tier-detection.ts.
*
* All external I/O (postgres, ioredis) is mocked — no live services required.
*
* Note on hoisting: vi.mock() factories are hoisted above all imports by vitest.
* Variables referenced inside factory callbacks must be declared via vi.hoisted()
* so they are available at hoist time.
*/
import { describe, it, expect, vi, beforeEach } from 'vitest';
/* ------------------------------------------------------------------ */
/* Hoist shared mock state so factories can reference it */
/* ------------------------------------------------------------------ */
const mocks = vi.hoisted(() => {
const mockSqlFn = vi.fn();
const mockEnd = vi.fn().mockResolvedValue(undefined);
const mockPostgresConstructor = vi.fn(() => {
const sql = mockSqlFn as ReturnType<typeof mockSqlFn>;
(sql as unknown as Record<string, unknown>)['end'] = mockEnd;
return sql;
});
const mockRedisConnect = vi.fn().mockResolvedValue(undefined);
const mockRedisPing = vi.fn().mockResolvedValue('PONG');
const mockRedisDisconnect = vi.fn();
const MockRedis = vi.fn().mockImplementation(() => ({
connect: mockRedisConnect,
ping: mockRedisPing,
disconnect: mockRedisDisconnect,
}));
return {
mockSqlFn,
mockEnd,
mockPostgresConstructor,
mockRedisConnect,
mockRedisPing,
mockRedisDisconnect,
MockRedis,
};
});
/* ------------------------------------------------------------------ */
/* Module mocks (registered at hoist time) */
/* ------------------------------------------------------------------ */
vi.mock('postgres', () => ({
default: mocks.mockPostgresConstructor,
}));
vi.mock('ioredis', () => ({
Redis: mocks.MockRedis,
}));
/* ------------------------------------------------------------------ */
/* Import SUT after mocks are registered */
/* ------------------------------------------------------------------ */
import { detectAndAssertTier, probeServiceHealth, TierDetectionError } from './tier-detection.js';
import type { TierConfig } from './tier-detection.js';
/* ------------------------------------------------------------------ */
/* Config fixtures */
/* ------------------------------------------------------------------ */
const LOCAL_CONFIG: TierConfig = {
tier: 'local',
storage: { type: 'pglite', dataDir: '.mosaic/pglite' },
queue: { type: 'local' },
};
const STANDALONE_CONFIG: TierConfig = {
tier: 'standalone',
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@db-host:5432/mosaic' },
queue: { type: 'bullmq', url: 'redis://valkey-host:6380' },
};
const FEDERATED_CONFIG: TierConfig = {
tier: 'federated',
storage: {
type: 'postgres',
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
enableVector: true,
},
queue: { type: 'bullmq', url: 'redis://valkey-host:6380' },
};
/* ------------------------------------------------------------------ */
/* Tests */
/* ------------------------------------------------------------------ */
describe('detectAndAssertTier', () => {
beforeEach(() => {
vi.clearAllMocks();
// Default: all probes succeed.
mocks.mockSqlFn.mockResolvedValue([]);
mocks.mockEnd.mockResolvedValue(undefined);
mocks.mockRedisConnect.mockResolvedValue(undefined);
mocks.mockRedisPing.mockResolvedValue('PONG');
// Re-wire constructor to return a fresh sql-like object each time.
mocks.mockPostgresConstructor.mockImplementation(() => {
const sql = mocks.mockSqlFn as ReturnType<typeof mocks.mockSqlFn>;
(sql as unknown as Record<string, unknown>)['end'] = mocks.mockEnd;
return sql;
});
mocks.MockRedis.mockImplementation(() => ({
connect: mocks.mockRedisConnect,
ping: mocks.mockRedisPing,
disconnect: mocks.mockRedisDisconnect,
}));
});
/* ---------------------------------------------------------------- */
/* 1. local — no-op */
/* ---------------------------------------------------------------- */
it('resolves immediately for tier=local without touching postgres or ioredis', async () => {
await expect(detectAndAssertTier(LOCAL_CONFIG)).resolves.toBeUndefined();
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
expect(mocks.MockRedis).not.toHaveBeenCalled();
});
/* ---------------------------------------------------------------- */
/* 2. standalone — happy path */
/* ---------------------------------------------------------------- */
it('resolves for tier=standalone when postgres and valkey are reachable', async () => {
await expect(detectAndAssertTier(STANDALONE_CONFIG)).resolves.toBeUndefined();
// Postgres was probed (SELECT 1 only — no pgvector check).
expect(mocks.mockPostgresConstructor).toHaveBeenCalledTimes(1);
expect(mocks.mockSqlFn).toHaveBeenCalledTimes(1);
// Valkey was probed.
expect(mocks.MockRedis).toHaveBeenCalledTimes(1);
expect(mocks.mockRedisPing).toHaveBeenCalledTimes(1);
});
/* ---------------------------------------------------------------- */
/* 3. standalone — postgres unreachable */
/* ---------------------------------------------------------------- */
it('throws TierDetectionError with service=postgres when postgres query rejects', async () => {
mocks.mockSqlFn.mockRejectedValueOnce(new Error('connection refused'));
const promise = detectAndAssertTier(STANDALONE_CONFIG);
await expect(promise).rejects.toBeInstanceOf(TierDetectionError);
// Confirm no valkey probe happened (fail fast on first error).
expect(mocks.MockRedis).not.toHaveBeenCalled();
});
it('sets service=postgres on the error when postgres fails', async () => {
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
try {
await detectAndAssertTier(STANDALONE_CONFIG);
expect.fail('should have thrown');
} catch (err) {
expect(err).toBeInstanceOf(TierDetectionError);
const typed = err as TierDetectionError;
expect(typed.service).toBe('postgres');
expect(typed.remediation).toContain('docker compose');
}
});
/* ---------------------------------------------------------------- */
/* 4. standalone — valkey unreachable */
/* ---------------------------------------------------------------- */
it('throws TierDetectionError with service=valkey when ping fails', async () => {
// Postgres probe succeeds; valkey connect fails.
mocks.mockSqlFn.mockResolvedValue([]);
mocks.mockRedisConnect.mockRejectedValue(new Error('ECONNREFUSED'));
try {
await detectAndAssertTier(STANDALONE_CONFIG);
expect.fail('should have thrown');
} catch (err) {
expect(err).toBeInstanceOf(TierDetectionError);
const typed = err as TierDetectionError;
expect(typed.service).toBe('valkey');
expect(typed.message).toContain('valkey');
expect(typed.remediation).toContain('valkey-federated');
}
});
/* ---------------------------------------------------------------- */
/* 5. federated — happy path */
/* ---------------------------------------------------------------- */
it('resolves for tier=federated when all three checks pass', async () => {
// SELECT 1 and CREATE EXTENSION both succeed.
mocks.mockSqlFn.mockResolvedValue([]);
await expect(detectAndAssertTier(FEDERATED_CONFIG)).resolves.toBeUndefined();
// postgres probe (SELECT 1) + pgvector probe (CREATE EXTENSION) = 2 postgres constructors.
expect(mocks.mockPostgresConstructor).toHaveBeenCalledTimes(2);
expect(mocks.mockSqlFn).toHaveBeenCalledTimes(2);
// Valkey probed once.
expect(mocks.MockRedis).toHaveBeenCalledTimes(1);
});
/* ---------------------------------------------------------------- */
/* 6. federated — pgvector not installable */
/* ---------------------------------------------------------------- */
it('throws TierDetectionError with service=pgvector when CREATE EXTENSION fails', async () => {
// SELECT 1 succeeds (first call), CREATE EXTENSION fails (second call).
mocks.mockSqlFn
.mockResolvedValueOnce([]) // SELECT 1
.mockRejectedValueOnce(new Error('extension "vector" is not available'));
try {
await detectAndAssertTier(FEDERATED_CONFIG);
expect.fail('should have thrown');
} catch (err) {
expect(err).toBeInstanceOf(TierDetectionError);
const typed = err as TierDetectionError;
expect(typed.service).toBe('pgvector');
expect(typed.message).toContain('pgvector');
expect(typed.remediation).toContain('pgvector/pgvector');
}
});
/* ---------------------------------------------------------------- */
/* 7. probeValkey honors connectTimeout and lazyConnect */
/* ---------------------------------------------------------------- */
it('constructs the ioredis Redis client with connectTimeout: 5000', async () => {
await detectAndAssertTier(STANDALONE_CONFIG);
expect(mocks.MockRedis).toHaveBeenCalledOnce();
expect(mocks.MockRedis).toHaveBeenCalledWith(
expect.any(String),
expect.objectContaining({ connectTimeout: 5000, lazyConnect: true }),
);
});
/* ---------------------------------------------------------------- */
/* 8. probePgvector — library-not-installed remediation */
/* ---------------------------------------------------------------- */
it('includes pgvector/pgvector:pg17 in remediation when pgvector library is missing', async () => {
// SELECT 1 succeeds; CREATE EXTENSION fails with the canonical library-missing message.
mocks.mockSqlFn
.mockResolvedValueOnce([]) // SELECT 1 (probePostgres)
.mockRejectedValueOnce(new Error('extension "vector" is not available')); // probePgvector
try {
await detectAndAssertTier(FEDERATED_CONFIG);
expect.fail('should have thrown');
} catch (err) {
expect(err).toBeInstanceOf(TierDetectionError);
const typed = err as TierDetectionError;
expect(typed.service).toBe('pgvector');
expect(typed.remediation).toContain('pgvector/pgvector:pg17');
}
});
/* ---------------------------------------------------------------- */
/* 9. probePgvector — permission / other error remediation */
/* ---------------------------------------------------------------- */
it('mentions CREATE permission or superuser in remediation for a generic pgvector error', async () => {
// SELECT 1 succeeds; CREATE EXTENSION fails with a permission error.
mocks.mockSqlFn
.mockResolvedValueOnce([]) // SELECT 1 (probePostgres)
.mockRejectedValueOnce(new Error('permission denied to create extension'));
try {
await detectAndAssertTier(FEDERATED_CONFIG);
expect.fail('should have thrown');
} catch (err) {
expect(err).toBeInstanceOf(TierDetectionError);
const typed = err as TierDetectionError;
expect(typed.service).toBe('pgvector');
// Must NOT point to the image fix — that's only for the library-missing case.
expect(typed.remediation).not.toContain('pgvector/pgvector:pg17');
// Must mention permissions or superuser.
expect(typed.remediation).toMatch(/CREATE|superuser/i);
}
});
/* ---------------------------------------------------------------- */
/* 10. federated tier rejects non-bullmq queue.type */
/* ---------------------------------------------------------------- */
it('throws TierDetectionError with service=config for federated tier with queue.type !== bullmq', async () => {
const badConfig: TierConfig = {
tier: 'federated',
storage: {
type: 'postgres',
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
enableVector: true,
},
queue: { type: 'local' },
};
try {
await detectAndAssertTier(badConfig);
expect.fail('should have thrown');
} catch (err) {
expect(err).toBeInstanceOf(TierDetectionError);
const typed = err as TierDetectionError;
expect(typed.service).toBe('config');
expect(typed.remediation).toContain('bullmq');
}
// No network probes should have been attempted.
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
expect(mocks.MockRedis).not.toHaveBeenCalled();
});
/* ---------------------------------------------------------------- */
/* 11. Error fields populated */
/* ---------------------------------------------------------------- */
it('populates host, port, and remediation on a thrown TierDetectionError', async () => {
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
let caught: TierDetectionError | undefined;
try {
await detectAndAssertTier(STANDALONE_CONFIG);
} catch (err) {
caught = err as TierDetectionError;
}
expect(caught).toBeInstanceOf(TierDetectionError);
expect(caught!.service).toBe('postgres');
// Host and port are extracted from the Postgres URL in STANDALONE_CONFIG.
expect(caught!.host).toBe('db-host');
expect(caught!.port).toBe(5432);
expect(caught!.remediation).toMatch(/docker compose/i);
expect(caught!.message).toContain('db-host:5432');
});
});
/* ------------------------------------------------------------------ */
/* probeServiceHealth tests */
/* ------------------------------------------------------------------ */
describe('probeServiceHealth', () => {
beforeEach(() => {
vi.clearAllMocks();
mocks.mockSqlFn.mockResolvedValue([]);
mocks.mockEnd.mockResolvedValue(undefined);
mocks.mockRedisConnect.mockResolvedValue(undefined);
mocks.mockRedisPing.mockResolvedValue('PONG');
mocks.mockPostgresConstructor.mockImplementation(() => {
const sql = mocks.mockSqlFn as ReturnType<typeof mocks.mockSqlFn>;
(sql as unknown as Record<string, unknown>)['end'] = mocks.mockEnd;
return sql;
});
mocks.MockRedis.mockImplementation(() => ({
connect: mocks.mockRedisConnect,
ping: mocks.mockRedisPing,
disconnect: mocks.mockRedisDisconnect,
}));
});
/* ---------------------------------------------------------------- */
/* 12. local tier — all skipped, green */
/* ---------------------------------------------------------------- */
it('returns all services as skipped and overall green for local tier', async () => {
const report = await probeServiceHealth(LOCAL_CONFIG);
expect(report.tier).toBe('local');
expect(report.overall).toBe('green');
expect(report.services).toHaveLength(3);
for (const svc of report.services) {
expect(svc.status).toBe('skipped');
}
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
expect(mocks.MockRedis).not.toHaveBeenCalled();
});
/* ---------------------------------------------------------------- */
/* 13. postgres fails, valkey ok → red */
/* ---------------------------------------------------------------- */
it('returns red overall with postgres fail and valkey ok for standalone when postgres fails', async () => {
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
const report = await probeServiceHealth(STANDALONE_CONFIG);
expect(report.overall).toBe('red');
const pgCheck = report.services.find((s) => s.name === 'postgres');
expect(pgCheck?.status).toBe('fail');
expect(pgCheck?.error).toBeDefined();
expect(pgCheck?.error?.remediation).toContain('docker compose');
const vkCheck = report.services.find((s) => s.name === 'valkey');
expect(vkCheck?.status).toBe('ok');
});
/* ---------------------------------------------------------------- */
/* 14. federated all green → 3 services ok */
/* ---------------------------------------------------------------- */
it('returns green overall with all 3 services ok for federated when all pass', async () => {
mocks.mockSqlFn.mockResolvedValue([]);
const report = await probeServiceHealth(FEDERATED_CONFIG);
expect(report.tier).toBe('federated');
expect(report.overall).toBe('green');
expect(report.services).toHaveLength(3);
for (const svc of report.services) {
expect(svc.status).toBe('ok');
}
});
/* ---------------------------------------------------------------- */
/* 15. durationMs is a non-negative number for every service check */
/* ---------------------------------------------------------------- */
it('sets durationMs as a non-negative number for every service check', async () => {
mocks.mockSqlFn.mockResolvedValue([]);
const report = await probeServiceHealth(FEDERATED_CONFIG);
for (const svc of report.services) {
expect(typeof svc.durationMs).toBe('number');
expect(svc.durationMs).toBeGreaterThanOrEqual(0);
}
});
it('sets durationMs >= 0 even for skipped services (local tier)', async () => {
const report = await probeServiceHealth(LOCAL_CONFIG);
for (const svc of report.services) {
expect(typeof svc.durationMs).toBe('number');
expect(svc.durationMs).toBeGreaterThanOrEqual(0);
}
});
/* ---------------------------------------------------------------- */
/* 16. configPath is passed through to the report */
/* ---------------------------------------------------------------- */
it('includes configPath in the report when provided', async () => {
const report = await probeServiceHealth(LOCAL_CONFIG, '/etc/mosaic/mosaic.config.json');
expect(report.configPath).toBe('/etc/mosaic/mosaic.config.json');
});
/* ---------------------------------------------------------------- */
/* 17. standalone — valkey fails, postgres ok → red */
/* ---------------------------------------------------------------- */
it('returns red with valkey fail and postgres ok for standalone when valkey fails', async () => {
mocks.mockSqlFn.mockResolvedValue([]);
mocks.mockRedisConnect.mockRejectedValue(new Error('ECONNREFUSED'));
const report = await probeServiceHealth(STANDALONE_CONFIG);
expect(report.overall).toBe('red');
const pgCheck = report.services.find((s) => s.name === 'postgres');
expect(pgCheck?.status).toBe('ok');
const vkCheck = report.services.find((s) => s.name === 'valkey');
expect(vkCheck?.status).toBe('fail');
expect(vkCheck?.error).toBeDefined();
});
/* ---------------------------------------------------------------- */
/* 18. federated — pgvector fails → red with remediation */
/* ---------------------------------------------------------------- */
it('returns red with pgvector fail for federated when pgvector probe fails', async () => {
mocks.mockSqlFn
.mockResolvedValueOnce([]) // postgres SELECT 1
.mockRejectedValueOnce(new Error('extension "vector" is not available')); // pgvector
const report = await probeServiceHealth(FEDERATED_CONFIG);
expect(report.overall).toBe('red');
const pvCheck = report.services.find((s) => s.name === 'pgvector');
expect(pvCheck?.status).toBe('fail');
expect(pvCheck?.error?.remediation).toContain('pgvector/pgvector:pg17');
});
/* ---------------------------------------------------------------- */
/* 19. federated — non-bullmq queue → red config error, no network */
/* ---------------------------------------------------------------- */
it('returns red overall with config error when federated tier has non-bullmq queue (no network call)', async () => {
const federatedBadQueueConfig: TierConfig = {
tier: 'federated',
storage: {
type: 'postgres',
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
enableVector: true,
},
queue: { type: 'local' },
};
const report = await probeServiceHealth(federatedBadQueueConfig);
expect(report.overall).toBe('red');
const valkey = report.services.find((s) => s.name === 'valkey');
expect(valkey?.status).toBe('fail');
expect(valkey?.error?.remediation).toMatch(/bullmq/i);
// Critically: no network call was made — MockRedis constructor must NOT have been called.
expect(mocks.MockRedis).not.toHaveBeenCalled();
});
/* ---------------------------------------------------------------- */
/* 20. durationMs actually measures real elapsed time */
/* ---------------------------------------------------------------- */
it('measures real elapsed time for service probes', async () => {
const DELAY_MS = 25;
// Make the postgres mock introduce a real wall-clock delay.
mocks.mockSqlFn.mockImplementation(
() =>
new Promise((resolve) =>
setTimeout(() => {
resolve([]);
}, DELAY_MS),
),
);
const report = await probeServiceHealth(STANDALONE_CONFIG);
const pgCheck = report.services.find((s) => s.name === 'postgres');
expect(pgCheck).toBeDefined();
// Must be >= 20ms (small slack for jitter). Would be 0 if timer were stubbed.
expect(pgCheck!.durationMs).toBeGreaterThanOrEqual(20);
});
});

View File

@@ -1,555 +0,0 @@
/**
* Tier Detection — pre-flight service reachability probes.
*
* Lifted from apps/gateway/src/bootstrap/tier-detector.ts so both the gateway
* and the mosaic CLI can share the same probe logic without duplicating code or
* creating circular workspace dependencies.
*
* Library choices:
* - Postgres: `postgres` npm package (already a dep via @mosaicstack/db / drizzle-orm).
* - Valkey: `ioredis` (compatible with Valkey; same URL convention used by bullmq).
*/
import postgres from 'postgres';
import { Redis } from 'ioredis';
/* ------------------------------------------------------------------ */
/* Local structural type — avoids circular dependency */
/* ------------------------------------------------------------------ */
/**
* Minimal structural shape required for tier detection.
* Mirrors the relevant fields of MosaicConfig (from @mosaicstack/config) without
* creating a dependency cycle (config depends on storage for StorageConfig).
* Any object that satisfies MosaicConfig also satisfies this type.
*/
export interface TierConfig {
tier: 'local' | 'standalone' | 'federated';
storage:
| { type: 'pglite'; dataDir?: string }
| { type: 'postgres'; url: string; enableVector?: boolean }
| { type: 'files'; dataDir: string; format?: 'json' | 'md' };
queue: { type: string; url?: string };
}
/* ------------------------------------------------------------------ */
/* Public types */
/* ------------------------------------------------------------------ */
export interface ServiceCheck {
name: 'postgres' | 'valkey' | 'pgvector';
status: 'ok' | 'fail' | 'skipped';
host?: string;
port?: number;
durationMs: number;
error?: { message: string; remediation: string };
}
export interface TierHealthReport {
tier: 'local' | 'standalone' | 'federated';
configPath?: string;
overall: 'green' | 'yellow' | 'red';
services: ServiceCheck[];
}
/* ------------------------------------------------------------------ */
/* Structured error type */
/* ------------------------------------------------------------------ */
export class TierDetectionError extends Error {
public readonly service: 'postgres' | 'valkey' | 'pgvector' | 'config';
public readonly host: string;
public readonly port: number;
public readonly remediation: string;
constructor(opts: {
service: 'postgres' | 'valkey' | 'pgvector' | 'config';
host: string;
port: number;
remediation: string;
cause?: unknown;
}) {
const message =
`[tier-detector] ${opts.service} unreachable or unusable at ` +
`${opts.host}:${opts.port}${opts.remediation}`;
super(message, { cause: opts.cause });
this.name = 'TierDetectionError';
this.service = opts.service;
this.host = opts.host;
this.port = opts.port;
this.remediation = opts.remediation;
}
}
/* ------------------------------------------------------------------ */
/* URL helpers */
/* ------------------------------------------------------------------ */
/** Extract host and port from a URL string, returning safe fallbacks on parse failure. */
function parseHostPort(url: string, defaultPort: number): { host: string; port: number } {
try {
const parsed = new URL(url);
const host = parsed.hostname || 'unknown';
const port = parsed.port ? parseInt(parsed.port, 10) : defaultPort;
return { host, port };
} catch {
return { host: 'unknown', port: defaultPort };
}
}
/* ------------------------------------------------------------------ */
/* Internal probe results */
/* ------------------------------------------------------------------ */
interface ProbeResult {
host: string;
port: number;
durationMs: number;
error?: { message: string; remediation: string };
}
/* ------------------------------------------------------------------ */
/* Postgres probe */
/* ------------------------------------------------------------------ */
async function probePostgres(url: string): Promise<void> {
const { host, port } = parseHostPort(url, 5432);
let sql: ReturnType<typeof postgres> | undefined;
try {
sql = postgres(url, {
max: 1,
connect_timeout: 5,
idle_timeout: 5,
});
// Run a trivial query to confirm connectivity.
await sql`SELECT 1`;
} catch (cause) {
throw new TierDetectionError({
service: 'postgres',
host,
port,
remediation:
'Start Postgres: `docker compose -f docker-compose.federated.yml --profile federated up -d postgres-federated`',
cause,
});
} finally {
if (sql) {
await sql.end({ timeout: 2 }).catch(() => {
// Ignore cleanup errors — we already have what we need.
});
}
}
}
async function probePostgresMeasured(url: string): Promise<ProbeResult> {
const { host, port } = parseHostPort(url, 5432);
const start = Date.now();
let sql: ReturnType<typeof postgres> | undefined;
try {
sql = postgres(url, {
max: 1,
connect_timeout: 5,
idle_timeout: 5,
});
await sql`SELECT 1`;
return { host, port, durationMs: Date.now() - start };
} catch (cause) {
return {
host,
port,
durationMs: Date.now() - start,
error: {
message: cause instanceof Error ? cause.message : String(cause),
remediation:
'Start Postgres: `docker compose -f docker-compose.federated.yml --profile federated up -d postgres-federated`',
},
};
} finally {
if (sql) {
await sql.end({ timeout: 2 }).catch(() => {});
}
}
}
/* ------------------------------------------------------------------ */
/* pgvector probe */
/* ------------------------------------------------------------------ */
async function probePgvector(url: string): Promise<void> {
const { host, port } = parseHostPort(url, 5432);
let sql: ReturnType<typeof postgres> | undefined;
try {
sql = postgres(url, {
max: 1,
connect_timeout: 5,
idle_timeout: 5,
});
// This succeeds whether the extension is already installed or freshly created.
// It errors only if the pgvector shared library is missing from the Postgres binary.
await sql`CREATE EXTENSION IF NOT EXISTS vector`;
} catch (cause) {
const causeMsg = cause instanceof Error ? cause.message.toLowerCase() : '';
const isLibraryMissing = causeMsg.includes('extension "vector" is not available');
const remediation = isLibraryMissing
? 'Use the `pgvector/pgvector:pg17` image, not the stock `postgres:17` image. See `docker-compose.federated.yml`.'
: 'The database role lacks permission to CREATE EXTENSION. Grant `CREATE` on the database, or run as a superuser.';
throw new TierDetectionError({
service: 'pgvector',
host,
port,
remediation,
cause,
});
} finally {
if (sql) {
await sql.end({ timeout: 2 }).catch(() => {
// Ignore cleanup errors.
});
}
}
}
async function probePgvectorMeasured(url: string): Promise<ProbeResult> {
const { host, port } = parseHostPort(url, 5432);
const start = Date.now();
let sql: ReturnType<typeof postgres> | undefined;
try {
sql = postgres(url, {
max: 1,
connect_timeout: 5,
idle_timeout: 5,
});
await sql`CREATE EXTENSION IF NOT EXISTS vector`;
return { host, port, durationMs: Date.now() - start };
} catch (cause) {
const causeMsg = cause instanceof Error ? cause.message.toLowerCase() : '';
const isLibraryMissing = causeMsg.includes('extension "vector" is not available');
const remediation = isLibraryMissing
? 'Use the `pgvector/pgvector:pg17` image, not the stock `postgres:17` image. See `docker-compose.federated.yml`.'
: 'The database role lacks permission to CREATE EXTENSION. Grant `CREATE` on the database, or run as a superuser.';
return {
host,
port,
durationMs: Date.now() - start,
error: { message: cause instanceof Error ? cause.message : String(cause), remediation },
};
} finally {
if (sql) {
await sql.end({ timeout: 2 }).catch(() => {});
}
}
}
/* ------------------------------------------------------------------ */
/* Valkey probe */
/* ------------------------------------------------------------------ */
const DEFAULT_VALKEY_URL = 'redis://localhost:6380';
async function probeValkey(url: string): Promise<void> {
const { host, port } = parseHostPort(url, 6380);
const client = new Redis(url, {
enableReadyCheck: false,
maxRetriesPerRequest: 0,
retryStrategy: () => null, // no retries — fail fast
lazyConnect: true,
connectTimeout: 5000, // fail-fast: 5-second hard cap on connection attempt
});
try {
await client.connect();
const pong = await client.ping();
if (pong !== 'PONG') {
throw new Error(`Unexpected PING response: ${pong}`);
}
} catch (cause) {
throw new TierDetectionError({
service: 'valkey',
host,
port,
remediation:
'Start Valkey: `docker compose -f docker-compose.federated.yml --profile federated up -d valkey-federated`',
cause,
});
} finally {
client.disconnect();
}
}
async function probeValkeyMeasured(url: string): Promise<ProbeResult> {
const { host, port } = parseHostPort(url, 6380);
const start = Date.now();
const client = new Redis(url, {
enableReadyCheck: false,
maxRetriesPerRequest: 0,
retryStrategy: () => null,
lazyConnect: true,
connectTimeout: 5000,
});
try {
await client.connect();
const pong = await client.ping();
if (pong !== 'PONG') {
throw new Error(`Unexpected PING response: ${pong}`);
}
return { host, port, durationMs: Date.now() - start };
} catch (cause) {
return {
host,
port,
durationMs: Date.now() - start,
error: {
message: cause instanceof Error ? cause.message : String(cause),
remediation:
'Start Valkey: `docker compose -f docker-compose.federated.yml --profile federated up -d valkey-federated`',
},
};
} finally {
client.disconnect();
}
}
/* ------------------------------------------------------------------ */
/* Public entry points */
/* ------------------------------------------------------------------ */
/**
* Assert that all services required by `config.tier` are reachable.
*
* - `local` — no-op (PGlite is in-process; no external services).
* - `standalone` — assert Postgres + Valkey (if queue.type === 'bullmq').
* - `federated` — assert Postgres + Valkey + pgvector installability.
*
* Throws `TierDetectionError` on the first failure with host:port and
* a remediation hint.
*/
export async function detectAndAssertTier(config: TierConfig): Promise<void> {
if (config.tier === 'local') {
// PGlite runs in-process — nothing to probe.
return;
}
const pgUrl =
config.storage.type === 'postgres' ? config.storage.url : 'postgresql://localhost:5432/mosaic';
const valkeyUrl =
config.queue.type === 'bullmq' ? (config.queue.url ?? DEFAULT_VALKEY_URL) : null;
if (config.tier === 'standalone') {
await probePostgres(pgUrl);
if (valkeyUrl) {
await probeValkey(valkeyUrl);
}
return;
}
// tier === 'federated'
// Reject misconfigured queue upfront — federated requires bullmq + a Valkey URL.
if (config.queue.type !== 'bullmq') {
throw new TierDetectionError({
service: 'config',
host: 'localhost',
port: 0,
remediation:
"Federated tier requires queue.type === 'bullmq'. " +
"Set queue: { type: 'bullmq', url: 'redis://...' } in your mosaic.config.json.",
});
}
const federatedValkeyUrl = config.queue.url ?? DEFAULT_VALKEY_URL;
await probePostgres(pgUrl);
await probeValkey(federatedValkeyUrl);
await probePgvector(pgUrl);
}
/**
* Non-throwing variant for `mosaic gateway doctor`.
*
* Probes ALL required services even if some fail, returning a structured report.
* Services not required for the current tier are reported as `skipped`.
*
* Overall status:
* - `green` — all required services OK
* - `yellow` — all required services OK, but a non-critical check failed
* (currently unused — reserved for future optional probes)
* - `red` — at least one required service failed
*/
export async function probeServiceHealth(
config: TierConfig,
configPath?: string,
): Promise<TierHealthReport> {
const tier = config.tier;
// local tier: PGlite is in-process, no external services needed.
if (tier === 'local') {
return {
tier,
configPath,
overall: 'green',
services: [
{ name: 'postgres', status: 'skipped', durationMs: 0 },
{ name: 'valkey', status: 'skipped', durationMs: 0 },
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
],
};
}
const pgUrl =
config.storage.type === 'postgres' ? config.storage.url : 'postgresql://localhost:5432/mosaic';
const valkeyUrl =
config.queue.type === 'bullmq' ? (config.queue.url ?? DEFAULT_VALKEY_URL) : null;
const services: ServiceCheck[] = [];
let hasFailure = false;
if (tier === 'standalone') {
// Postgres — required
const pgResult = await probePostgresMeasured(pgUrl);
if (pgResult.error) {
hasFailure = true;
services.push({
name: 'postgres',
status: 'fail',
host: pgResult.host,
port: pgResult.port,
durationMs: pgResult.durationMs,
error: pgResult.error,
});
} else {
services.push({
name: 'postgres',
status: 'ok',
host: pgResult.host,
port: pgResult.port,
durationMs: pgResult.durationMs,
});
}
// Valkey — required if bullmq
if (valkeyUrl) {
const vkResult = await probeValkeyMeasured(valkeyUrl);
if (vkResult.error) {
hasFailure = true;
services.push({
name: 'valkey',
status: 'fail',
host: vkResult.host,
port: vkResult.port,
durationMs: vkResult.durationMs,
error: vkResult.error,
});
} else {
services.push({
name: 'valkey',
status: 'ok',
host: vkResult.host,
port: vkResult.port,
durationMs: vkResult.durationMs,
});
}
} else {
services.push({ name: 'valkey', status: 'skipped', durationMs: 0 });
}
// pgvector — not required for standalone
services.push({ name: 'pgvector', status: 'skipped', durationMs: 0 });
return {
tier,
configPath,
overall: hasFailure ? 'red' : 'green',
services,
};
}
// tier === 'federated'
// Postgres — required
const pgResult = await probePostgresMeasured(pgUrl);
if (pgResult.error) {
hasFailure = true;
services.push({
name: 'postgres',
status: 'fail',
host: pgResult.host,
port: pgResult.port,
durationMs: pgResult.durationMs,
error: pgResult.error,
});
} else {
services.push({
name: 'postgres',
status: 'ok',
host: pgResult.host,
port: pgResult.port,
durationMs: pgResult.durationMs,
});
}
// Valkey — required for federated (queue.type must be bullmq)
if (config.queue.type !== 'bullmq') {
hasFailure = true;
services.push({
name: 'valkey',
status: 'fail',
host: 'localhost',
port: 0,
durationMs: 0,
error: {
message: "Federated tier requires queue.type === 'bullmq'",
remediation:
"Set queue: { type: 'bullmq', url: 'redis://...' } in your mosaic.config.json.",
},
});
} else {
const federatedValkeyUrl = config.queue.url ?? DEFAULT_VALKEY_URL;
const vkResult = await probeValkeyMeasured(federatedValkeyUrl);
if (vkResult.error) {
hasFailure = true;
services.push({
name: 'valkey',
status: 'fail',
host: vkResult.host,
port: vkResult.port,
durationMs: vkResult.durationMs,
error: vkResult.error,
});
} else {
services.push({
name: 'valkey',
status: 'ok',
host: vkResult.host,
port: vkResult.port,
durationMs: vkResult.durationMs,
});
}
}
// pgvector — required for federated
const pvResult = await probePgvectorMeasured(pgUrl);
if (pvResult.error) {
hasFailure = true;
services.push({
name: 'pgvector',
status: 'fail',
host: pvResult.host,
port: pvResult.port,
durationMs: pvResult.durationMs,
error: pvResult.error,
});
} else {
services.push({
name: 'pgvector',
status: 'ok',
host: pvResult.host,
port: pvResult.port,
durationMs: pvResult.durationMs,
});
}
return {
tier,
configPath,
overall: hasFailure ? 'red' : 'green',
services,
};
}

View File

@@ -38,6 +38,6 @@ export interface StorageAdapter {
} }
export type StorageConfig = export type StorageConfig =
| { type: 'postgres'; url: string; enableVector?: boolean } | { type: 'postgres'; url: string }
| { type: 'pglite'; dataDir?: string } | { type: 'pglite'; dataDir?: string }
| { type: 'files'; dataDir: string; format?: 'json' | 'md' }; | { type: 'files'; dataDir: string; format?: 'json' | 'md' };

View File

@@ -1,8 +0,0 @@
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
globals: true,
environment: 'node',
},
});

12
pnpm-lock.yaml generated
View File

@@ -152,18 +152,12 @@ importers:
fastify: fastify:
specifier: ^5.0.0 specifier: ^5.0.0
version: 5.8.2 version: 5.8.2
ioredis:
specifier: ^5.10.0
version: 5.10.0
node-cron: node-cron:
specifier: ^4.2.1 specifier: ^4.2.1
version: 4.2.1 version: 4.2.1
openai: openai:
specifier: ^6.32.0 specifier: ^6.32.0
version: 6.32.0(ws@8.20.0)(zod@4.3.6) version: 6.32.0(ws@8.20.0)(zod@4.3.6)
postgres:
specifier: ^3.4.8
version: 3.4.8
reflect-metadata: reflect-metadata:
specifier: ^0.2.0 specifier: ^0.2.0
version: 0.2.2 version: 0.2.2
@@ -648,12 +642,6 @@ importers:
commander: commander:
specifier: ^13.0.0 specifier: ^13.0.0
version: 13.1.0 version: 13.1.0
ioredis:
specifier: ^5.10.0
version: 5.10.0
postgres:
specifier: ^3.4.8
version: 3.4.8
devDependencies: devDependencies:
typescript: typescript:
specifier: ^5.8.0 specifier: ^5.8.0