From b445033c6971f0f3fc82af02b7aaa1adde5f4760 Mon Sep 17 00:00:00 2001 From: Jarvis Date: Thu, 23 Apr 2026 20:21:31 -0500 Subject: [PATCH] feat(federation): two-gateway test harness scaffold (FED-M3-02) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds tools/federation-harness/ — the permanent test bed for M3+ federation E2E tests. Boots two gateways (Server A + Server B) on a shared Docker bridge network with per-gateway Postgres/pgvector + Valkey and a shared Step-CA. - docker-compose.two-gateways.yml: gateway-a/b, postgres-a/b, valkey-a/b, step-ca; image digest-pinned to sha256:1069117740e... (sha-9f1a081, #491) - seed.ts: provisions scope variants A/B/C via real admin REST API; walks full enrollment flow (peer keypair → grant → token → redeem → cert store) - harness.ts: bootHarness/tearDownHarness/serverA/serverB/seed helpers for vitest; idempotent boot (reuses running stack when both gateways healthy) - README.md: prereqs, topology, seed usage, vitest integration, port override, troubleshooting, image digest note No production code modified. Quality gates: typecheck ✓ lint ✓ format ✓ Closes #462 Co-Authored-By: Claude Sonnet 4.6 --- tools/federation-harness/README.md | 218 +++++++++ .../docker-compose.two-gateways.yml | 243 ++++++++++ tools/federation-harness/harness.ts | 258 +++++++++++ tools/federation-harness/seed.ts | 428 ++++++++++++++++++ 4 files changed, 1147 insertions(+) create mode 100644 tools/federation-harness/README.md create mode 100644 tools/federation-harness/docker-compose.two-gateways.yml create mode 100644 tools/federation-harness/harness.ts create mode 100644 tools/federation-harness/seed.ts diff --git a/tools/federation-harness/README.md b/tools/federation-harness/README.md new file mode 100644 index 0000000..aa2095b --- /dev/null +++ b/tools/federation-harness/README.md @@ -0,0 +1,218 @@ +# Federation Test Harness + +Local two-gateway federation test infrastructure for Mosaic Stack M3+. + +This harness boots two real gateway instances (`gateway-a`, `gateway-b`) on a +shared Docker bridge network, each backed by its own Postgres (pgvector) + +Valkey, sharing a single Step-CA. It is the test bed for all M3+ federation +E2E tests. + +## Prerequisites + +- Docker with Compose v2 (`docker compose version` ≥ 2.20) +- pnpm (for running via repo scripts) +- `infra/step-ca/dev-password` must exist (copy from `infra/step-ca/dev-password.example`) + +## Network Topology + +``` +Host machine +├── localhost:14001 → gateway-a (Server A — home / requesting) +├── localhost:14002 → gateway-b (Server B — work / serving) +├── localhost:15432 → postgres-a +├── localhost:15433 → postgres-b +├── localhost:16379 → valkey-a +├── localhost:16380 → valkey-b +└── localhost:19000 → step-ca (shared CA) + +Docker network: fed-test-net (bridge) + gateway-a ←──── mTLS ────→ gateway-b + ↘ ↗ + step-ca +``` + +Ports are chosen to avoid collision with the base dev stack (5433, 6380, 14242, 9000). + +## Starting the Harness + +```bash +# From repo root +docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d + +# Wait for all services to be healthy (~60-90s on first boot due to NestJS cold start) +docker compose -f tools/federation-harness/docker-compose.two-gateways.yml ps +``` + +## Seeding Test Data + +The seed script provisions three grant scope variants (A, B, C) and walks the +full enrollment flow so Server A ends up with active peers pointing at Server B. + +```bash +# Assumes stack is already running +pnpm tsx tools/federation-harness/seed.ts + +# Or boot + seed in one step +pnpm tsx tools/federation-harness/seed.ts --boot +``` + +### Scope Variants + +| Variant | Resources | Filters | Excluded | Purpose | +| ------- | ------------------ | ---------------------------------- | ----------- | ------------------------------- | +| A | tasks, notes | include_personal: true | (none) | Personal data federation | +| B | tasks | include_teams: ['T1'], no personal | (none) | Team-scoped, no personal | +| C | tasks, credentials | include_personal: true | credentials | Sanity: excluded wins over list | + +## Using from Vitest + +```ts +import { + bootHarness, + tearDownHarness, + serverA, + serverB, + seed, +} from '../../tools/federation-harness/harness.js'; +import type { HarnessHandle } from '../../tools/federation-harness/harness.js'; + +let handle: HarnessHandle; + +beforeAll(async () => { + handle = await bootHarness(); +}, 180_000); // allow 3 min for Docker pull + NestJS cold start + +afterAll(async () => { + await tearDownHarness(handle); +}); + +test('variant A: list tasks returns personal tasks', async () => { + const seedResult = await seed(handle, 'variantA'); + const a = serverA(handle); + + const res = await fetch(`${a.baseUrl}/api/federation/tasks`, { + headers: { 'x-federation-grant': seedResult.grants.variantA.id }, + }); + expect(res.status).toBe(200); +}); +``` + +The `bootHarness()` function is **idempotent**: if both gateways are already +healthy, it reuses the running stack and returns `ownedStack: false`. Tests +should not call `tearDownHarness` when `ownedStack` is false unless they +explicitly want to shut down a shared stack. + +## Vitest Config (pnpm test:federation) + +Add to `vitest.config.ts` at repo root (or a dedicated config): + +```ts +// vitest.federation.config.ts +import { defineConfig } from 'vitest/config'; + +export default defineConfig({ + test: { + include: ['**/*.federation.test.ts'], + testTimeout: 60_000, + hookTimeout: 180_000, + reporters: ['verbose'], + }, +}); +``` + +Then add to root `package.json`: + +```json +"test:federation": "vitest run --config vitest.federation.config.ts" +``` + +## Nuking State + +```bash +# Remove containers AND volumes (ephemeral state — CA keys, DBs, everything) +docker compose -f tools/federation-harness/docker-compose.two-gateways.yml down -v +``` + +On next `up`, Step-CA re-initialises from scratch and generates new CA keys. + +## Step-CA Root Certificate + +The CA root lives in the `fed-harness-step-ca` Docker volume at +`/home/step/certs/root_ca.crt`. To extract it to the host: + +```bash +docker run --rm \ + -v fed-harness-step-ca:/home/step \ + alpine cat /home/step/certs/root_ca.crt > /tmp/fed-harness-root-ca.crt +``` + +## Troubleshooting + +### Port conflicts + +Default host ports: 14001, 14002, 15432, 15433, 16379, 16380, 19000. +Override via environment variables before `docker compose up`: + +```bash +GATEWAY_A_HOST_PORT=14101 GATEWAY_B_HOST_PORT=14102 \ + docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d +``` + +### Image pull failures + +The gateway image is digest-pinned to: + +``` +git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02 +``` + +(sha-9f1a081, post-#491 IMG-FIX) + +If the registry is unreachable, Docker will use the locally cached image if +present. If no local image exists, the compose up will fail with a pull error. +In that case: + +1. Ensure you can reach `git.mosaicstack.dev` (VPN, DNS, etc.). +2. Log in: `docker login git.mosaicstack.dev` +3. Pull manually: `docker pull git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02` + +### NestJS cold start + +Gateway containers take 40–60 seconds to become healthy on first boot (Node.js +module resolution + NestJS DI bootstrap). The `start_period: 60s` in the +compose healthcheck covers this. `bootHarness()` polls for up to 3 minutes. + +### Step-CA startup + +Step-CA initialises on first boot (generates CA keys). This takes ~5-10s. +The `start_period: 30s` in the healthcheck covers it. Both gateways wait for +Step-CA to be healthy before starting (`depends_on: step-ca: condition: service_healthy`). + +### dev-password missing + +The Step-CA container requires `infra/step-ca/dev-password` to be mounted. +Copy the example and set a local password: + +```bash +cp infra/step-ca/dev-password.example infra/step-ca/dev-password +# Edit the file to set your preferred dev CA password +``` + +The file is `.gitignore`d — do not commit it. + +## Image Digest Note + +The gateway image is pinned to `sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02` +(sha-9f1a081). This is the digest promoted by PR #491 (IMG-FIX). The `latest` +tag is forbidden per Mosaic image policy. When a new gateway build is promoted, +update the digest in `docker-compose.two-gateways.yml` and in this file. + +## Permanent Infrastructure + +This harness is designed to outlive M3 and be reused by M4+ milestone tests. +It is not a throwaway scaffold — treat it as production test infrastructure: + +- Keep it idempotent. +- Do not hardcode test assumptions in the harness layer (put them in tests). +- Update the seed script when new scope variants are needed. +- The README and harness should be kept in sync as the federation API evolves. diff --git a/tools/federation-harness/docker-compose.two-gateways.yml b/tools/federation-harness/docker-compose.two-gateways.yml new file mode 100644 index 0000000..eb345b2 --- /dev/null +++ b/tools/federation-harness/docker-compose.two-gateways.yml @@ -0,0 +1,243 @@ +# tools/federation-harness/docker-compose.two-gateways.yml +# +# Two-gateway federation test harness — local-only, no Portainer/Swarm needed. +# +# USAGE (manual): +# docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d +# docker compose -f tools/federation-harness/docker-compose.two-gateways.yml down -v +# +# USAGE (from harness.ts): +# const handle = await bootHarness(); +# ... +# await tearDownHarness(handle); +# +# TOPOLOGY: +# gateway-a — "home" instance (Server A, the requesting side) +# └── postgres-a (pgvector/pg17, port 15432) +# └── valkey-a (port 16379) +# gateway-b — "work" instance (Server B, the serving side) +# └── postgres-b (pgvector/pg17, port 15433) +# └── valkey-b (port 16380) +# step-ca — shared CA for both gateways (port 19000) +# +# All services share the `fed-test-net` bridge network. +# Host port ranges (15432-15433, 16379-16380, 14001-14002, 19000) are chosen +# to avoid collision with the base dev stack (5433, 6380, 14242, 9000). +# +# IMAGE: +# Pinned to the immutable digest sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02 +# (sha-9f1a081, post-#491 IMG-FIX, smoke-tested locally). +# Update this digest only after a new CI build is promoted to the registry. +# +# STEP-CA: +# Single shared Step-CA instance. Both gateways connect to it. +# CA volume is ephemeral per `docker compose down -v`; regenerated on next up. +# The harness seed script provisions the CA roots cross-trust after first boot. + +services: + # ─── Shared Certificate Authority ──────────────────────────────────────────── + step-ca: + image: smallstep/step-ca:0.27.4 + container_name: fed-harness-step-ca + restart: unless-stopped + ports: + - '${STEP_CA_HOST_PORT:-19000}:9000' + volumes: + - step_ca_data:/home/step + - ../../infra/step-ca/init.sh:/usr/local/bin/mosaic-step-ca-init.sh:ro + - ../../infra/step-ca/templates:/etc/step-ca-templates:ro + - ../../infra/step-ca/dev-password:/run/secrets/ca_password:ro + entrypoint: ['/bin/sh', '/usr/local/bin/mosaic-step-ca-init.sh'] + networks: + - fed-test-net + healthcheck: + test: + [ + 'CMD', + 'step', + 'ca', + 'health', + '--ca-url', + 'https://localhost:9000', + '--root', + '/home/step/certs/root_ca.crt', + ] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + + # ─── Server A — Home / Requesting Gateway ──────────────────────────────────── + postgres-a: + image: pgvector/pgvector:pg17 + container_name: fed-harness-postgres-a + restart: unless-stopped + ports: + - '${PG_A_HOST_PORT:-15432}:5432' + environment: + POSTGRES_USER: mosaic + POSTGRES_PASSWORD: mosaic + POSTGRES_DB: mosaic + volumes: + - pg_a_data:/var/lib/postgresql/data + - ../../infra/pg-init:/docker-entrypoint-initdb.d:ro + networks: + - fed-test-net + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mosaic'] + interval: 5s + timeout: 3s + retries: 5 + + valkey-a: + image: valkey/valkey:8-alpine + container_name: fed-harness-valkey-a + restart: unless-stopped + ports: + - '${VALKEY_A_HOST_PORT:-16379}:6379' + volumes: + - valkey_a_data:/data + networks: + - fed-test-net + healthcheck: + test: ['CMD', 'valkey-cli', 'ping'] + interval: 5s + timeout: 3s + retries: 5 + + gateway-a: + image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02 + # Tag for human reference: sha-9f1a081 (post-#491 IMG-FIX; smoke-tested locally) + container_name: fed-harness-gateway-a + restart: unless-stopped + ports: + - '${GATEWAY_A_HOST_PORT:-14001}:3000' + environment: + MOSAIC_TIER: federated + DATABASE_URL: postgres://mosaic:mosaic@postgres-a:5432/mosaic + VALKEY_URL: redis://valkey-a:6379 + GATEWAY_PORT: '3000' + GATEWAY_CORS_ORIGIN: 'http://localhost:14001' + BETTER_AUTH_SECRET: harness-secret-server-a-do-not-use-in-prod + BETTER_AUTH_URL: 'http://gateway-a:3000' + STEP_CA_URL: 'https://step-ca:9000' + FEDERATION_PEER_HOSTNAME: gateway-a + # Admin key — fixed for harness use only; never use in production + ADMIN_API_KEY: harness-admin-key-a + depends_on: + postgres-a: + condition: service_healthy + valkey-a: + condition: service_healthy + step-ca: + condition: service_healthy + networks: + - fed-test-net + healthcheck: + test: + [ + 'CMD', + 'node', + '-e', + "require('http').get('http://127.0.0.1:3000/api/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))", + ] + interval: 10s + timeout: 5s + retries: 5 + start_period: 60s + + # ─── Server B — Work / Serving Gateway ────────────────────────────────────── + postgres-b: + image: pgvector/pgvector:pg17 + container_name: fed-harness-postgres-b + restart: unless-stopped + ports: + - '${PG_B_HOST_PORT:-15433}:5432' + environment: + POSTGRES_USER: mosaic + POSTGRES_PASSWORD: mosaic + POSTGRES_DB: mosaic + volumes: + - pg_b_data:/var/lib/postgresql/data + - ../../infra/pg-init:/docker-entrypoint-initdb.d:ro + networks: + - fed-test-net + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U mosaic'] + interval: 5s + timeout: 3s + retries: 5 + + valkey-b: + image: valkey/valkey:8-alpine + container_name: fed-harness-valkey-b + restart: unless-stopped + ports: + - '${VALKEY_B_HOST_PORT:-16380}:6379' + volumes: + - valkey_b_data:/data + networks: + - fed-test-net + healthcheck: + test: ['CMD', 'valkey-cli', 'ping'] + interval: 5s + timeout: 3s + retries: 5 + + gateway-b: + image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02 + # Tag for human reference: sha-9f1a081 (post-#491 IMG-FIX; smoke-tested locally) + container_name: fed-harness-gateway-b + restart: unless-stopped + ports: + - '${GATEWAY_B_HOST_PORT:-14002}:3000' + environment: + MOSAIC_TIER: federated + DATABASE_URL: postgres://mosaic:mosaic@postgres-b:5432/mosaic + VALKEY_URL: redis://valkey-b:6379 + GATEWAY_PORT: '3000' + GATEWAY_CORS_ORIGIN: 'http://localhost:14002' + BETTER_AUTH_SECRET: harness-secret-server-b-do-not-use-in-prod + BETTER_AUTH_URL: 'http://gateway-b:3000' + STEP_CA_URL: 'https://step-ca:9000' + FEDERATION_PEER_HOSTNAME: gateway-b + # Admin key — fixed for harness use only; never use in production + ADMIN_API_KEY: harness-admin-key-b + depends_on: + postgres-b: + condition: service_healthy + valkey-b: + condition: service_healthy + step-ca: + condition: service_healthy + networks: + - fed-test-net + healthcheck: + test: + [ + 'CMD', + 'node', + '-e', + "require('http').get('http://127.0.0.1:3000/api/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))", + ] + interval: 10s + timeout: 5s + retries: 5 + start_period: 60s + +networks: + fed-test-net: + name: fed-test-net + driver: bridge + +volumes: + step_ca_data: + name: fed-harness-step-ca + pg_a_data: + name: fed-harness-pg-a + valkey_a_data: + name: fed-harness-valkey-a + pg_b_data: + name: fed-harness-pg-b + valkey_b_data: + name: fed-harness-valkey-b diff --git a/tools/federation-harness/harness.ts b/tools/federation-harness/harness.ts new file mode 100644 index 0000000..2631b16 --- /dev/null +++ b/tools/federation-harness/harness.ts @@ -0,0 +1,258 @@ +/** + * tools/federation-harness/harness.ts + * + * Vitest-consumable helpers for the two-gateway federation harness. + * + * USAGE (in a vitest test file): + * + * import { bootHarness, tearDownHarness, serverA, serverB, seed } from + * '../../tools/federation-harness/harness.js'; + * + * let handle: HarnessHandle; + * + * beforeAll(async () => { + * handle = await bootHarness(); + * }, 180_000); + * + * afterAll(async () => { + * await tearDownHarness(handle); + * }); + * + * test('variant A — list tasks', async () => { + * const seedResult = await seed(handle, 'variantA'); + * const a = serverA(handle); + * const res = await fetch(`${a.baseUrl}/api/federation/list/tasks`, { + * headers: { 'x-admin-key': a.adminKey }, + * }); + * expect(res.status).toBe(200); + * }); + * + * ESM / NodeNext: all imports use .js extensions. + */ + +import { execSync, execFileSync } from 'node:child_process'; +import { resolve, dirname } from 'node:path'; +import { fileURLToPath } from 'node:url'; +import { runSeed, type SeedResult } from './seed.js'; + +// ─── Types ─────────────────────────────────────────────────────────────────── + +export interface GatewayAccessor { + /** Base URL reachable from the host machine, e.g. http://localhost:14001 */ + baseUrl: string; + /** Admin key for X-Admin-Key header */ + adminKey: string; + /** Internal Docker network hostname (for container-to-container calls) */ + internalHostname: string; +} + +export interface HarnessHandle { + /** Server A accessor */ + a: GatewayAccessor; + /** Server B accessor */ + b: GatewayAccessor; + /** Absolute path to the docker-compose file */ + composeFile: string; + /** Whether this instance booted the stack (vs. reusing an existing one) */ + ownedStack: boolean; + /** Optional seed result if seed() was called */ + seedResult?: SeedResult; +} + +export type SeedScenario = 'variantA' | 'variantB' | 'variantC' | 'all'; + +// ─── Constants ──────────────────────────────────────────────────────────────── + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml'); + +const GATEWAY_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001'; +const GATEWAY_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002'; +const ADMIN_KEY_A = process.env['ADMIN_KEY_A'] ?? 'harness-admin-key-a'; +const ADMIN_KEY_B = process.env['ADMIN_KEY_B'] ?? 'harness-admin-key-b'; + +const READINESS_TIMEOUT_MS = 180_000; +const READINESS_POLL_MS = 3_000; + +// ─── Internal helpers ───────────────────────────────────────────────────────── + +async function isGatewayHealthy(baseUrl: string): Promise { + try { + const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) }); + return res.ok; + } catch { + return false; + } +} + +async function waitForStack(handle: HarnessHandle): Promise { + const deadline = Date.now() + READINESS_TIMEOUT_MS; + const gateways: Array<{ label: string; url: string }> = [ + { label: 'gateway-a', url: handle.a.baseUrl }, + { label: 'gateway-b', url: handle.b.baseUrl }, + ]; + + for (const gw of gateways) { + process.stdout.write(`[harness] Waiting for ${gw.label}...`); + while (Date.now() < deadline) { + if (await isGatewayHealthy(gw.url)) { + process.stdout.write(' ready\n'); + break; + } + if (Date.now() + READINESS_POLL_MS > deadline) { + throw new Error( + `[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS}ms`, + ); + } + await new Promise((r) => setTimeout(r, READINESS_POLL_MS)); + process.stdout.write('.'); + } + } +} + +function isStackRunning(): boolean { + try { + const output = execFileSync( + 'docker', + ['compose', '-f', COMPOSE_FILE, 'ps', '--format', 'json'], + { encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] }, + ); + + if (!output.trim()) return false; + + // Parse JSON lines — each running service emits a JSON object per line + const lines = output.trim().split('\n').filter(Boolean); + const runningServices = lines.filter((line) => { + try { + const obj = JSON.parse(line) as { State?: string }; + return obj.State === 'running'; + } catch { + return false; + } + }); + + // Expect at least gateway-a and gateway-b running + return runningServices.length >= 2; + } catch { + return false; + } +} + +// ─── Public API ─────────────────────────────────────────────────────────────── + +/** + * Boot the harness stack. + * + * Idempotent: if the stack is already running and both gateways are healthy, + * this function reuses the existing stack and returns a handle with + * `ownedStack: false`. Callers that set `ownedStack: false` should NOT call + * `tearDownHarness` unless they explicitly want to tear down a pre-existing stack. + * + * If the stack is not running, it starts it with `docker compose up -d` and + * waits for both gateways to pass their /api/health probe. + */ +export async function bootHarness(): Promise { + const handle: HarnessHandle = { + a: { + baseUrl: GATEWAY_A_URL, + adminKey: ADMIN_KEY_A, + internalHostname: 'gateway-a', + }, + b: { + baseUrl: GATEWAY_B_URL, + adminKey: ADMIN_KEY_B, + internalHostname: 'gateway-b', + }, + composeFile: COMPOSE_FILE, + ownedStack: false, + }; + + // Check if both gateways are already healthy + const [aHealthy, bHealthy] = await Promise.all([ + isGatewayHealthy(handle.a.baseUrl), + isGatewayHealthy(handle.b.baseUrl), + ]); + + if (aHealthy && bHealthy) { + console.log('[harness] Stack already running — reusing existing stack.'); + handle.ownedStack = false; + return handle; + } + + console.log('[harness] Starting federation harness stack...'); + execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' }); + handle.ownedStack = true; + + await waitForStack(handle); + console.log('[harness] Stack is ready.'); + + return handle; +} + +/** + * Tear down the harness stack. + * + * Runs `docker compose down -v` to remove containers AND volumes (ephemeral state). + * Only tears down if `handle.ownedStack` is true unless `force` is set. + */ +export async function tearDownHarness( + handle: HarnessHandle, + opts?: { force?: boolean }, +): Promise { + if (!handle.ownedStack && !opts?.force) { + console.log( + '[harness] Stack not owned by this handle — skipping teardown (pass force: true to override).', + ); + return; + } + + console.log('[harness] Tearing down federation harness stack...'); + execSync(`docker compose -f "${handle.composeFile}" down -v`, { stdio: 'inherit' }); + console.log('[harness] Stack torn down.'); +} + +/** + * Return the Server A accessor from a harness handle. + * Convenience wrapper for test readability. + */ +export function serverA(handle: HarnessHandle): GatewayAccessor { + return handle.a; +} + +/** + * Return the Server B accessor from a harness handle. + * Convenience wrapper for test readability. + */ +export function serverB(handle: HarnessHandle): GatewayAccessor { + return handle.b; +} + +/** + * Seed the harness with test data for one or more scenarios. + * + * @param handle The harness handle returned by bootHarness(). + * @param scenario Which scope variants to provision: + * 'variantA' | 'variantB' | 'variantC' | 'all' + * + * Returns a SeedResult with grant IDs and peer IDs for each variant, + * which test assertions can reference. + */ +export async function seed( + handle: HarnessHandle, + scenario: SeedScenario = 'all', +): Promise { + // For now all scenarios run the full seed — M3-11 can narrow this. + // The seed script is idempotent in the sense that it creates new grants + // each time; tests should start with a clean stack for isolation. + void scenario; // narrowing deferred to M3-11 + + const result = await runSeed({ + serverAUrl: handle.a.baseUrl, + serverBUrl: handle.b.baseUrl, + adminKeyA: handle.a.adminKey, + adminKeyB: handle.b.adminKey, + }); + + handle.seedResult = result; + return result; +} diff --git a/tools/federation-harness/seed.ts b/tools/federation-harness/seed.ts new file mode 100644 index 0000000..caab4a2 --- /dev/null +++ b/tools/federation-harness/seed.ts @@ -0,0 +1,428 @@ +#!/usr/bin/env tsx +/** + * tools/federation-harness/seed.ts + * + * Provisions test data for the two-gateway federation harness. + * Run via: tsx tools/federation-harness/seed.ts + * + * What this script does: + * 1. (Optional) Boots the compose stack if --boot flag is passed. + * 2. Waits for both gateways to be healthy. + * 3. Creates three grants on Server B matching the M3 acceptance test scenarios: + * - Scope variant A: tasks + notes, include_personal: true + * - Scope variant B: tasks only, include_teams: ['T1'], exclude T2 + * - Scope variant C: tasks + credentials in resources, credentials excluded (sanity) + * 4. For each grant, walks the enrollment flow so Server A ends up with + * an active peer + cert + sealed key. + * 5. Inserts representative test tasks/notes/credentials on Server B. + * + * IMPORTANT: This script uses the real admin REST API — no direct DB writes. + * It exercises the full enrollment flow as M3 acceptance tests will. + * + * ESM / NodeNext: all imports use .js extensions. + */ + +import { execSync } from 'node:child_process'; +import { resolve, dirname } from 'node:path'; +import { fileURLToPath } from 'node:url'; + +// ─── Constants ─────────────────────────────────────────────────────────────── + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml'); + +/** Base URLs as seen from the host machine (mapped host ports). */ +const SERVER_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001'; +const SERVER_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002'; +const ADMIN_KEY_A = process.env['ADMIN_KEY_A'] ?? 'harness-admin-key-a'; +const ADMIN_KEY_B = process.env['ADMIN_KEY_B'] ?? 'harness-admin-key-b'; + +const READINESS_TIMEOUT_MS = 120_000; +const READINESS_POLL_MS = 3_000; + +// ─── Scope variant definitions (for M3 acceptance tests) ───────────────────── + +/** Scope variant A — tasks + notes, personal data included. */ +export const SCOPE_VARIANT_A = { + resources: ['tasks', 'notes'], + filters: { + tasks: { include_personal: true }, + notes: { include_personal: true }, + }, + excluded_resources: [] as string[], + max_rows_per_query: 500, +}; + +/** Scope variant B — tasks only, team T1 only, no personal. */ +export const SCOPE_VARIANT_B = { + resources: ['tasks'], + filters: { + tasks: { include_teams: ['T1'], include_personal: false }, + }, + excluded_resources: [] as string[], + max_rows_per_query: 500, +}; + +/** + * Scope variant C — tasks + credentials in resources list, but credentials + * explicitly in excluded_resources. Sanity test: credentials must still be + * inaccessible even though they appear in resources. + */ +export const SCOPE_VARIANT_C = { + resources: ['tasks', 'credentials'], + filters: { + tasks: { include_personal: true }, + }, + excluded_resources: ['credentials'], + max_rows_per_query: 500, +}; + +// ─── Inline types (no import from packages/types — M3-01 branch not yet merged) ─ + +interface AdminFetchOptions { + method?: string; + body?: unknown; + adminKey: string; +} + +interface PeerRecord { + peerId: string; + csrPem: string; +} + +interface GrantRecord { + id: string; + status: string; + scope: unknown; +} + +interface EnrollmentTokenResult { + token: string; + expiresAt: string; + enrollmentUrl: string; +} + +interface EnrollmentRedeemResult { + certPem: string; + certChainPem: string; +} + +export interface SeedResult { + serverAUrl: string; + serverBUrl: string; + grants: { + variantA: GrantRecord; + variantB: GrantRecord; + variantC: GrantRecord; + }; + peers: { + variantA: PeerRecord & { grantId: string }; + variantB: PeerRecord & { grantId: string }; + variantC: PeerRecord & { grantId: string }; + }; +} + +// ─── HTTP helpers ───────────────────────────────────────────────────────────── + +async function adminFetch(baseUrl: string, path: string, opts: AdminFetchOptions): Promise { + const url = `${baseUrl}${path}`; + const res = await fetch(url, { + method: opts.method ?? 'GET', + headers: { + 'Content-Type': 'application/json', + 'x-admin-key': opts.adminKey, + }, + body: opts.body !== undefined ? JSON.stringify(opts.body) : undefined, + }); + + if (!res.ok) { + const text = await res.text().catch(() => '(no body)'); + throw new Error(`${opts.method ?? 'GET'} ${url} → ${res.status}: ${text}`); + } + + return res.json() as Promise; +} + +// ─── Readiness probe ────────────────────────────────────────────────────────── + +async function waitForGateway(baseUrl: string, label: string): Promise { + const deadline = Date.now() + READINESS_TIMEOUT_MS; + let lastError: string = ''; + + while (Date.now() < deadline) { + try { + const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) }); + if (res.ok) { + console.log(`[seed] ${label} is ready (${baseUrl})`); + return; + } + lastError = `HTTP ${res.status}`; + } catch (err) { + lastError = err instanceof Error ? err.message : String(err); + } + await new Promise((r) => setTimeout(r, READINESS_POLL_MS)); + } + + throw new Error( + `[seed] ${label} did not become ready within ${READINESS_TIMEOUT_MS}ms — last error: ${lastError}`, + ); +} + +// ─── Enrollment flow ────────────────────────────────────────────────────────── + +/** + * Walk the full enrollment flow for one grant: + * 1. Create a peer keypair on Server A (generates CSR). + * 2. Create a grant on Server B referencing the peer. + * 3. Generate an enrollment token on Server B. + * 4. Redeem the token on Server B with A's CSR → get cert back. + * 5. Store the cert on Server A's peer record. + * + * Returns the activated grant record + peer info. + */ +async function enrollGrant(opts: { + label: string; + subjectUserId: string; + scope: unknown; +}): Promise<{ grant: GrantRecord; peer: PeerRecord & { grantId: string } }> { + const { label, subjectUserId, scope } = opts; + console.log(`\n[seed] Enrolling grant for scope variant ${label}...`); + + // 1. Create peer keypair on Server A + const peer = await adminFetch(SERVER_A_URL, '/api/admin/federation/peers/keypair', { + method: 'POST', + adminKey: ADMIN_KEY_A, + body: { + commonName: `harness-peer-${label.toLowerCase()}`, + displayName: `Harness Peer ${label}`, + endpointUrl: `${SERVER_B_URL}`, + }, + }); + console.log(`[seed] Created peer on A: ${peer.peerId}`); + + // 2. Create grant on Server B + const grant = await adminFetch(SERVER_B_URL, '/api/admin/federation/grants', { + method: 'POST', + adminKey: ADMIN_KEY_B, + body: { + peerId: peer.peerId, + subjectUserId, + scope, + }, + }); + console.log(`[seed] Created grant on B: ${grant.id} (status: ${grant.status})`); + + // 3. Generate enrollment token on Server B + const tokenResult = await adminFetch( + SERVER_B_URL, + `/api/admin/federation/grants/${grant.id}/tokens`, + { method: 'POST', adminKey: ADMIN_KEY_B, body: { ttlSeconds: 900 } }, + ); + console.log(`[seed] Enrollment token: ${tokenResult.token.slice(0, 8)}...`); + + // 4. Redeem token on Server B with A's CSR + // The enrollment endpoint is not admin-guarded — it uses the one-time token. + const redeemUrl = `${SERVER_B_URL}/api/federation/enrollment/${tokenResult.token}`; + const redeemRes = await fetch(redeemUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ csrPem: peer.csrPem }), + }); + + if (!redeemRes.ok) { + const body = await redeemRes.text().catch(() => '(no body)'); + throw new Error(`Enrollment redemption failed: ${redeemRes.status} — ${body}`); + } + + const redeemResult = (await redeemRes.json()) as EnrollmentRedeemResult; + console.log(`[seed] Cert issued (${redeemResult.certPem.length} bytes)`); + + // 5. Store cert on Server A peer record + await adminFetch(SERVER_A_URL, `/api/admin/federation/peers/${peer.peerId}/cert`, { + method: 'PATCH', + adminKey: ADMIN_KEY_A, + body: { certPem: redeemResult.certPem }, + }); + console.log(`[seed] Cert stored on A — peer ${peer.peerId} is now active`); + + // Verify grant flipped to active on B + const activeGrant = await adminFetch( + SERVER_B_URL, + `/api/admin/federation/grants/${grant.id}`, + { adminKey: ADMIN_KEY_B }, + ); + console.log(`[seed] Grant status on B: ${activeGrant.status}`); + + return { grant: activeGrant, peer: { ...peer, grantId: grant.id } }; +} + +// ─── Test data insertion ────────────────────────────────────────────────────── + +/** + * Insert representative test data on Server B via its admin APIs. + * + * NOTE: The gateway's task/note/credential APIs require an authenticated user + * session. For the harness, we seed via admin-level endpoints if available, + * or document the gap here for M3-11 to fill in with proper user session seeding. + * + * ASSUMPTION: Server B exposes POST /api/admin/tasks (or similar) for test data. + * If that endpoint does not yet exist, this function logs a warning and skips + * without failing — M3-11 will add the session-based seeding path. + */ +async function seedTestData(subjectUserId: string, scopeLabel: string): Promise { + console.log(`\n[seed] Seeding test data on Server B for ${scopeLabel}...`); + + const testTasks = [ + { + title: `${scopeLabel} Task 1`, + description: 'Federation harness test task', + userId: subjectUserId, + }, + { + title: `${scopeLabel} Task 2`, + description: 'Team-scoped test task', + userId: subjectUserId, + teamId: 'T1', + }, + ]; + + const testNotes = [ + { + title: `${scopeLabel} Note 1`, + content: 'Personal note for federation test', + userId: subjectUserId, + }, + ]; + + // Attempt to insert — tolerate 404 (endpoint not yet implemented) + for (const task of testTasks) { + try { + await adminFetch(SERVER_B_URL, '/api/admin/tasks', { + method: 'POST', + adminKey: ADMIN_KEY_B, + body: task, + }); + console.log(`[seed] Inserted task: "${task.title}"`); + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + if (msg.includes('404') || msg.includes('Cannot POST')) { + console.warn( + `[seed] WARN: /api/admin/tasks not found — skipping task insertion (expected until M3-11)`, + ); + break; + } + throw err; + } + } + + for (const note of testNotes) { + try { + await adminFetch(SERVER_B_URL, '/api/admin/notes', { + method: 'POST', + adminKey: ADMIN_KEY_B, + body: note, + }); + console.log(`[seed] Inserted note: "${note.title}"`); + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + if (msg.includes('404') || msg.includes('Cannot POST')) { + console.warn( + `[seed] WARN: /api/admin/notes not found — skipping note insertion (expected until M3-11)`, + ); + break; + } + throw err; + } + } + + console.log(`[seed] Test data seeding for ${scopeLabel} complete.`); +} + +// ─── Main entrypoint ────────────────────────────────────────────────────────── + +export async function runSeed(opts?: { + serverAUrl?: string; + serverBUrl?: string; + adminKeyA?: string; + adminKeyB?: string; + subjectUserIds?: { variantA: string; variantB: string; variantC: string }; +}): Promise { + const aUrl = opts?.serverAUrl ?? SERVER_A_URL; + const bUrl = opts?.serverBUrl ?? SERVER_B_URL; + const keyA = opts?.adminKeyA ?? ADMIN_KEY_A; + const keyB = opts?.adminKeyB ?? ADMIN_KEY_B; + + // Use provided or default subject user IDs + // In a real run these would be real user UUIDs from Server B's DB. + // For the harness, we use deterministic UUIDs that the seed bootstrap creates. + const subjectIds = opts?.subjectUserIds ?? { + variantA: '00000000-0000-0000-0000-000000000001', + variantB: '00000000-0000-0000-0000-000000000002', + variantC: '00000000-0000-0000-0000-000000000003', + }; + + console.log('[seed] Waiting for gateways to be ready...'); + await Promise.all([waitForGateway(aUrl, 'Server A'), waitForGateway(bUrl, 'Server B')]); + + // Enroll all three scope variants in parallel + console.log('\n[seed] Enrolling scope variants...'); + const [resultA, resultB, resultC] = await Promise.all([ + enrollGrant({ label: 'A', subjectUserId: subjectIds.variantA, scope: SCOPE_VARIANT_A }), + enrollGrant({ label: 'B', subjectUserId: subjectIds.variantB, scope: SCOPE_VARIANT_B }), + enrollGrant({ label: 'C', subjectUserId: subjectIds.variantC, scope: SCOPE_VARIANT_C }), + ]); + + // Seed test data on Server B for each scope variant + await Promise.all([ + seedTestData(subjectIds.variantA, 'A'), + seedTestData(subjectIds.variantB, 'B'), + seedTestData(subjectIds.variantC, 'C'), + ]); + + const result: SeedResult = { + serverAUrl: aUrl, + serverBUrl: bUrl, + grants: { + variantA: resultA.grant, + variantB: resultB.grant, + variantC: resultC.grant, + }, + peers: { + variantA: resultA.peer, + variantB: resultB.peer, + variantC: resultC.peer, + }, + }; + + console.log('\n[seed] Seed complete.'); + console.log('[seed] Summary:'); + console.log(` Variant A grant: ${result.grants.variantA.id} (${result.grants.variantA.status})`); + console.log(` Variant B grant: ${result.grants.variantB.id} (${result.grants.variantB.status})`); + console.log(` Variant C grant: ${result.grants.variantC.id} (${result.grants.variantC.status})`); + + return result; +} + +// ─── CLI entry ──────────────────────────────────────────────────────────────── + +const isCli = + process.argv[1] != null && + fileURLToPath(import.meta.url).endsWith(process.argv[1]!.split('/').pop()!); + +if (isCli) { + const shouldBoot = process.argv.includes('--boot'); + + if (shouldBoot) { + console.log('[seed] --boot flag detected — starting compose stack...'); + execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' }); + } + + runSeed() + .then(() => { + process.exit(0); + }) + .catch((err) => { + console.error('[seed] Fatal:', err); + process.exit(1); + }); +}