Compare commits

..

2 Commits

Author SHA1 Message Date
Jarvis
495b3869c2 fix(types): capabilities response — add filters + rate_limit stub + tighten supported_verbs (review remediation)
Some checks are pending
ci/woodpecker/push/ci Pipeline is running
ci/woodpecker/pr/ci Pipeline is running
- HIGH-1: add `filters` field (mirrors FederationScope PRD §8.1) and `rate_limit` stub (PRD §9.1) to FederationCapabilitiesResponseSchema; M4 populates remaining/reset_at
- MED-3: tighten supported_verbs from z.array(z.string()) to z.array(z.enum(FEDERATION_VERBS)) closed enum
- Add 10 new test cases covering filters shape, rate_limit full/minimal/absent/invalid, and invalid verb rejection

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-23 20:34:47 -05:00
Jarvis
ac475d240d feat(types): federation v1 wire-format DTOs (FED-M3-01)
Some checks failed
ci/woodpecker/pr/ci Pipeline failed
ci/woodpecker/push/ci Pipeline failed
Add packages/types/src/federation/ with Zod v4 schemas and inferred TS
types for the M3 federation wire protocol. Includes verbs constant,
request schema, list/get/capabilities response factories, _source tag
helper, and typed error envelope + exception hierarchy. 46 unit tests;
typecheck, lint, and format:check all green.

Closes #462

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-23 20:20:50 -05:00
8 changed files with 37 additions and 1613 deletions

View File

@@ -24,11 +24,10 @@
*/
import 'reflect-metadata';
import { describe, it, expect, vi, beforeEach, beforeAll } from 'vitest';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { GoneException, NotFoundException } from '@nestjs/common';
import type { Db } from '@mosaicstack/db';
import { EnrollmentService } from '../enrollment.service.js';
import { makeSelfSignedCert } from './helpers/test-cert.js';
// ---------------------------------------------------------------------------
// Test constants
@@ -39,18 +38,10 @@ const PEER_ID = 'p2222222-2222-2222-2222-222222222222';
const USER_ID = 'u3333333-3333-3333-3333-333333333333';
const TOKEN = 'a'.repeat(64); // 64-char hex
// Real self-signed EC P-256 cert — populated once in beforeAll.
// Required because EnrollmentService.extractCertNotAfter calls new X509Certificate(certPem)
// with strict parsing (PR #501 HIGH-2: no silent fallback).
let REAL_CERT_PEM: string;
const MOCK_CHAIN_PEM = () => REAL_CERT_PEM + REAL_CERT_PEM;
const MOCK_CERT_PEM = '-----BEGIN CERTIFICATE-----\nMOCK\n-----END CERTIFICATE-----\n';
const MOCK_CHAIN_PEM = MOCK_CERT_PEM + MOCK_CERT_PEM;
const MOCK_SERIAL = 'ABCD1234';
beforeAll(async () => {
REAL_CERT_PEM = await makeSelfSignedCert();
});
// ---------------------------------------------------------------------------
// Factory helpers
// ---------------------------------------------------------------------------
@@ -112,27 +103,11 @@ function makeDb({
const claimUpdateMock = vi.fn().mockReturnValue({ set: setClaimMock });
// transaction(cb) — cb receives txMock; txMock has update + insert
//
// The tx mock must support two tx.update() call patterns (CRIT-2, PR #501):
// 1. Grant activation: .update().set().where().returning() → resolves to [{ id }]
// 2. Peer update: .update().set().where() → resolves to undefined
//
// We achieve this by making txWhereUpdate return an object with BOTH a thenable
// interface (so `await tx.update().set().where()` works) AND a .returning() method.
const txGrantActivatedRow = { id: GRANT_ID };
const txReturningMock = vi.fn().mockResolvedValue([txGrantActivatedRow]);
const txWhereUpdate = vi.fn().mockReturnValue({
// .returning() for grant activation (first tx.update call)
returning: txReturningMock,
// thenables so `await tx.update().set().where()` also works for peer update
then: (resolve: (v: undefined) => void) => resolve(undefined),
catch: () => undefined,
finally: () => undefined,
});
const txSetMock = vi.fn().mockReturnValue({ where: txWhereUpdate });
const txUpdateMock = vi.fn().mockReturnValue({ set: txSetMock });
const txInsertValues = vi.fn().mockResolvedValue(undefined);
const txInsertMock = vi.fn().mockReturnValue({ values: txInsertValues });
const txWhereUpdate = vi.fn().mockResolvedValue(undefined);
const txSetMock = vi.fn().mockReturnValue({ where: txWhereUpdate });
const txUpdateMock = vi.fn().mockReturnValue({ set: txSetMock });
const txMock = { update: txUpdateMock, insert: txInsertMock };
const transactionMock = vi
.fn()
@@ -157,7 +132,6 @@ function makeDb({
txInsertValues,
txInsertMock,
txWhereUpdate,
txReturningMock,
txSetMock,
txUpdateMock,
txMock,
@@ -172,13 +146,11 @@ function makeDb({
function makeCaService() {
return {
// REAL_CERT_PEM is populated by beforeAll — safe to reference via closure here
// because makeCaService() is only called after the suite's beforeAll runs.
issueCert: vi.fn().mockImplementation(async () => ({
certPem: REAL_CERT_PEM,
certChainPem: MOCK_CHAIN_PEM(),
issueCert: vi.fn().mockResolvedValue({
certPem: MOCK_CERT_PEM,
certChainPem: MOCK_CHAIN_PEM,
serialNumber: MOCK_SERIAL,
})),
}),
};
}
@@ -329,29 +301,29 @@ describe('EnrollmentService.redeem — success path', () => {
});
caService.issueCert.mockImplementation(async () => {
callOrder.push('issueCert');
return { certPem: REAL_CERT_PEM, certChainPem: MOCK_CHAIN_PEM(), serialNumber: MOCK_SERIAL };
return { certPem: MOCK_CERT_PEM, certChainPem: MOCK_CHAIN_PEM, serialNumber: MOCK_SERIAL };
});
await service.redeem(TOKEN, '---CSR---');
await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(callOrder).toEqual(['claim', 'issueCert']);
});
it('calls CaService.issueCert with grantId, subjectUserId, csrPem, ttlSeconds=300', async () => {
await service.redeem(TOKEN, '---CSR---');
await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(caService.issueCert).toHaveBeenCalledWith(
expect.objectContaining({
grantId: GRANT_ID,
subjectUserId: USER_ID,
csrPem: '---CSR---',
csrPem: MOCK_CERT_PEM,
ttlSeconds: 300,
}),
);
});
it('runs activate grant + peer update + audit inside a transaction', async () => {
await service.redeem(TOKEN, '---CSR---');
await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(db._mocks.transactionMock).toHaveBeenCalledOnce();
// tx.update called twice: activate grant + update peer
@@ -361,17 +333,17 @@ describe('EnrollmentService.redeem — success path', () => {
});
it('activates grant (sets status=active) inside the transaction', async () => {
await service.redeem(TOKEN, '---CSR---');
await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(db._mocks.txSetMock).toHaveBeenCalledWith(expect.objectContaining({ status: 'active' }));
});
it('updates the federationPeers row with certPem, certSerial, state=active inside the transaction', async () => {
await service.redeem(TOKEN, '---CSR---');
await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(db._mocks.txSetMock).toHaveBeenCalledWith(
expect.objectContaining({
certPem: REAL_CERT_PEM,
certPem: MOCK_CERT_PEM,
certSerial: MOCK_SERIAL,
state: 'active',
}),
@@ -379,7 +351,7 @@ describe('EnrollmentService.redeem — success path', () => {
});
it('inserts an audit log row inside the transaction', async () => {
await service.redeem(TOKEN, '---CSR---');
await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(db._mocks.txInsertValues).toHaveBeenCalledWith(
expect.objectContaining({
@@ -391,11 +363,11 @@ describe('EnrollmentService.redeem — success path', () => {
});
it('returns { certPem, certChainPem } from CaService', async () => {
const result = await service.redeem(TOKEN, '---CSR---');
const result = await service.redeem(TOKEN, MOCK_CERT_PEM);
expect(result).toEqual({
certPem: REAL_CERT_PEM,
certChainPem: MOCK_CHAIN_PEM(),
certPem: MOCK_CERT_PEM,
certChainPem: MOCK_CHAIN_PEM,
});
});
});

View File

@@ -1,138 +0,0 @@
/**
* Test helpers for generating real X.509 PEM certificates in unit tests.
*
* PR #501 (FED-M2-11) introduced strict `new X509Certificate(certPem)` parsing
* in both EnrollmentService.extractCertNotAfter and CaService.issueCert — dummy
* cert strings now throw `error:0680007B:asn1 encoding routines::header too long`.
*
* These helpers produce minimal but cryptographically valid self-signed EC P-256
* certificates via @peculiar/x509 + Node.js webcrypto, suitable for test mocks.
*
* Two variants:
* - makeSelfSignedCert() Plain cert — satisfies node:crypto X509Certificate parse.
* - makeMosaicIssuedCert(opts) Cert with custom Mosaic OID extensions — satisfies the
* CRIT-1 OID presence + value checks in CaService.issueCert.
*/
import { webcrypto } from 'node:crypto';
import {
X509CertificateGenerator,
Extension,
KeyUsagesExtension,
KeyUsageFlags,
BasicConstraintsExtension,
cryptoProvider,
} from '@peculiar/x509';
// ---------------------------------------------------------------------------
// Internal helpers
// ---------------------------------------------------------------------------
/**
* Encode a string as an ASN.1 UTF8String TLV:
* 0x0C (tag) + 1-byte length (for strings ≤ 127 bytes) + UTF-8 bytes.
*
* CaService.issueCert reads the extension value as:
* decoder.decode(grantIdExt.value.slice(2))
* i.e. it skips the tag + length byte and decodes the remainder as UTF-8.
* So we must produce exactly this encoding as the OCTET STRING content.
*/
function encodeUtf8String(value: string): Uint8Array {
const utf8 = new TextEncoder().encode(value);
if (utf8.length > 127) {
throw new Error('encodeUtf8String: value too long for single-byte length encoding');
}
const buf = new Uint8Array(2 + utf8.length);
buf[0] = 0x0c; // ASN.1 UTF8String tag
buf[1] = utf8.length;
buf.set(utf8, 2);
return buf;
}
// ---------------------------------------------------------------------------
// Mosaic OID constants (must match production CaService)
// ---------------------------------------------------------------------------
const OID_MOSAIC_GRANT_ID = '1.3.6.1.4.1.99999.1';
const OID_MOSAIC_SUBJECT_USER_ID = '1.3.6.1.4.1.99999.2';
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Generate a minimal self-signed EC P-256 certificate valid for 1 day.
* CN=harness-test, no custom extensions.
*
* Suitable for:
* - EnrollmentService.extractCertNotAfter (just needs parseable PEM)
* - Any mock that returns certPem / certChainPem without OID checks
*/
export async function makeSelfSignedCert(): Promise<string> {
// Ensure @peculiar/x509 uses Node.js webcrypto (available as globalThis.crypto in Node 19+,
// but we set it explicitly here to be safe on all Node 18+ versions).
cryptoProvider.set(webcrypto as unknown as Parameters<typeof cryptoProvider.set>[0]);
const alg = { name: 'ECDSA', namedCurve: 'P-256', hash: 'SHA-256' } as const;
const keys = await webcrypto.subtle.generateKey(alg, false, ['sign', 'verify']);
const now = new Date();
const tomorrow = new Date(now.getTime() + 86_400_000);
const cert = await X509CertificateGenerator.createSelfSigned({
serialNumber: '01',
name: 'CN=harness-test',
notBefore: now,
notAfter: tomorrow,
signingAlgorithm: alg,
keys,
extensions: [
new BasicConstraintsExtension(false),
new KeyUsagesExtension(KeyUsageFlags.digitalSignature),
],
});
return cert.toString('pem');
}
/**
* Generate a self-signed EC P-256 certificate that contains the two custom
* Mosaic OID extensions required by CaService.issueCert's CRIT-1 check:
* OID 1.3.6.1.4.1.99999.1 → mosaic_grant_id (value = grantId)
* OID 1.3.6.1.4.1.99999.2 → mosaic_subject_user_id (value = subjectUserId)
*
* The extension value encoding matches the production parser's `.slice(2)` assumption:
* each extension value is an OCTET STRING wrapping an ASN.1 UTF8String TLV.
*/
export async function makeMosaicIssuedCert(opts: {
grantId: string;
subjectUserId: string;
}): Promise<string> {
// Ensure @peculiar/x509 uses Node.js webcrypto.
cryptoProvider.set(webcrypto as unknown as Parameters<typeof cryptoProvider.set>[0]);
const alg = { name: 'ECDSA', namedCurve: 'P-256', hash: 'SHA-256' } as const;
const keys = await webcrypto.subtle.generateKey(alg, false, ['sign', 'verify']);
const now = new Date();
const tomorrow = new Date(now.getTime() + 86_400_000);
const cert = await X509CertificateGenerator.createSelfSigned({
serialNumber: '01',
name: 'CN=mosaic-issued-test',
notBefore: now,
notAfter: tomorrow,
signingAlgorithm: alg,
keys,
extensions: [
new BasicConstraintsExtension(false),
new KeyUsagesExtension(KeyUsageFlags.digitalSignature),
// mosaic_grant_id — OID 1.3.6.1.4.1.99999.1
new Extension(OID_MOSAIC_GRANT_ID, false, encodeUtf8String(opts.grantId)),
// mosaic_subject_user_id — OID 1.3.6.1.4.1.99999.2
new Extension(OID_MOSAIC_SUBJECT_USER_ID, false, encodeUtf8String(opts.subjectUserId)),
],
});
return cert.toString('pem');
}

View File

@@ -20,10 +20,9 @@
*/
import 'reflect-metadata';
import { describe, it, expect, vi, beforeEach, beforeAll, type Mock } from 'vitest';
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
import { jwtVerify, exportJWK, generateKeyPair } from 'jose';
import { Pkcs10CertificateRequestGenerator } from '@peculiar/x509';
import { makeMosaicIssuedCert } from './__tests__/helpers/test-cert.js';
// ---------------------------------------------------------------------------
// Mock node:https BEFORE importing CaService so the mock is in place when
@@ -75,11 +74,6 @@ const FAKE_CA_PEM = FAKE_CERT_PEM;
const GRANT_ID = 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11';
const SUBJECT_USER_ID = 'b1ffcd00-0d1c-5f09-cc7e-7cc0ce491b22';
// Real self-signed cert containing both Mosaic OID extensions — populated in beforeAll.
// Required because CaService.issueCert performs CRIT-1 OID presence/value checks on the
// response cert (PR #501 — strict parsing, no silent fallback).
let realIssuedCertPem: string;
// ---------------------------------------------------------------------------
// Generate a real EC P-256 key pair and CSR for integration-style tests
// ---------------------------------------------------------------------------
@@ -200,15 +194,6 @@ function makeHttpsMock(statusCode: number, body: unknown, errorMsg?: string): vo
describe('CaService', () => {
let service: CaService;
beforeAll(async () => {
// Generate a cert with the two Mosaic OIDs so that CaService.issueCert's
// CRIT-1 OID checks pass when mock step-ca returns it as `crt`.
realIssuedCertPem = await makeMosaicIssuedCert({
grantId: GRANT_ID,
subjectUserId: SUBJECT_USER_ID,
});
});
beforeEach(() => {
vi.clearAllMocks();
service = new CaService();
@@ -241,9 +226,9 @@ describe('CaService', () => {
// Now test that the service's validateCsr accepts it.
// We call it indirectly via issueCert with a successful mock.
makeHttpsMock(200, { crt: realIssuedCertPem, certChain: [realIssuedCertPem, FAKE_CA_PEM] });
makeHttpsMock(200, { crt: FAKE_CERT_PEM, certChain: [FAKE_CERT_PEM, FAKE_CA_PEM] });
const result = await service.issueCert(makeReq({ csrPem: realCsrPem }));
expect(result.certPem).toBe(realIssuedCertPem);
expect(result.certPem).toBe(FAKE_CERT_PEM);
});
it('throws INVALID_CSR for a malformed PEM-shaped CSR', async () => {
@@ -266,14 +251,14 @@ describe('CaService', () => {
it('returns IssuedCertDto on success (certChain present)', async () => {
if (!realCsrPem) realCsrPem = await generateRealCsr();
makeHttpsMock(200, {
crt: realIssuedCertPem,
certChain: [realIssuedCertPem, FAKE_CA_PEM],
crt: FAKE_CERT_PEM,
certChain: [FAKE_CERT_PEM, FAKE_CA_PEM],
});
const result = await service.issueCert(makeReq());
expect(result.certPem).toBe(realIssuedCertPem);
expect(result.certChainPem).toContain(realIssuedCertPem);
expect(result.certPem).toBe(FAKE_CERT_PEM);
expect(result.certChainPem).toContain(FAKE_CERT_PEM);
expect(result.certChainPem).toContain(FAKE_CA_PEM);
expect(typeof result.serialNumber).toBe('string');
});
@@ -285,14 +270,14 @@ describe('CaService', () => {
it('builds certChainPem from crt+ca when certChain is absent', async () => {
if (!realCsrPem) realCsrPem = await generateRealCsr();
makeHttpsMock(200, {
crt: realIssuedCertPem,
crt: FAKE_CERT_PEM,
ca: FAKE_CA_PEM,
});
const result = await service.issueCert(makeReq());
expect(result.certPem).toBe(realIssuedCertPem);
expect(result.certChainPem).toContain(realIssuedCertPem);
expect(result.certPem).toBe(FAKE_CERT_PEM);
expect(result.certChainPem).toContain(FAKE_CERT_PEM);
expect(result.certChainPem).toContain(FAKE_CA_PEM);
});
@@ -302,12 +287,12 @@ describe('CaService', () => {
it('falls back to certPem alone when certChain and ca are absent', async () => {
if (!realCsrPem) realCsrPem = await generateRealCsr();
makeHttpsMock(200, { crt: realIssuedCertPem });
makeHttpsMock(200, { crt: FAKE_CERT_PEM });
const result = await service.issueCert(makeReq());
expect(result.certPem).toBe(realIssuedCertPem);
expect(result.certChainPem).toBe(realIssuedCertPem);
expect(result.certPem).toBe(FAKE_CERT_PEM);
expect(result.certChainPem).toBe(FAKE_CERT_PEM);
});
// -------------------------------------------------------------------------
@@ -413,7 +398,7 @@ describe('CaService', () => {
statusCode: 200,
on: (event: string, cb: (chunk?: Buffer) => void) => {
if (event === 'data') {
cb(Buffer.from(JSON.stringify({ crt: realIssuedCertPem })));
cb(Buffer.from(JSON.stringify({ crt: FAKE_CERT_PEM })));
}
if (event === 'end') {
cb();
@@ -570,7 +555,7 @@ describe('CaService', () => {
statusCode: 200,
on: (event: string, cb: (chunk?: Buffer) => void) => {
if (event === 'data') {
cb(Buffer.from(JSON.stringify({ crt: realIssuedCertPem })));
cb(Buffer.from(JSON.stringify({ crt: FAKE_CERT_PEM })));
}
if (event === 'end') {
cb();

View File

@@ -30,7 +30,6 @@ export default tseslint.config(
'apps/gateway/vitest.config.ts',
'packages/storage/vitest.config.ts',
'packages/mosaic/__tests__/*.ts',
'tools/federation-harness/*.ts',
],
},
},

View File

@@ -1,254 +0,0 @@
# Federation Test Harness
Local two-gateway federation test infrastructure for Mosaic Stack M3+.
This harness boots two real gateway instances (`gateway-a`, `gateway-b`) on a
shared Docker bridge network, each backed by its own Postgres (pgvector) +
Valkey, sharing a single Step-CA. It is the test bed for all M3+ federation
E2E tests.
## Prerequisites
- Docker with Compose v2 (`docker compose version` ≥ 2.20)
- pnpm (for running via repo scripts)
- `infra/step-ca/dev-password` must exist (copy from `infra/step-ca/dev-password.example`)
## Network Topology
```
Host machine
├── localhost:14001 → gateway-a (Server A — home / requesting)
├── localhost:14002 → gateway-b (Server B — work / serving)
├── localhost:15432 → postgres-a
├── localhost:15433 → postgres-b
├── localhost:16379 → valkey-a
├── localhost:16380 → valkey-b
└── localhost:19000 → step-ca (shared CA)
Docker network: fed-test-net (bridge)
gateway-a ←──── mTLS ────→ gateway-b
↘ ↗
step-ca
```
Ports are chosen to avoid collision with the base dev stack (5433, 6380, 14242, 9000).
## Starting the Harness
```bash
# From repo root
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d
# Wait for all services to be healthy (~60-90s on first boot due to NestJS cold start)
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml ps
```
## Seeding Test Data
The seed script provisions three grant scope variants (A, B, C) and walks the
full enrollment flow so Server A ends up with active peers pointing at Server B.
```bash
# Assumes stack is already running
pnpm tsx tools/federation-harness/seed.ts
# Or boot + seed in one step
pnpm tsx tools/federation-harness/seed.ts --boot
```
### Scope Variants
| Variant | Resources | Filters | Excluded | Purpose |
| ------- | ------------------ | ---------------------------------- | ----------- | ------------------------------- |
| A | tasks, notes | include_personal: true | (none) | Personal data federation |
| B | tasks | include_teams: ['T1'], no personal | (none) | Team-scoped, no personal |
| C | tasks, credentials | include_personal: true | credentials | Sanity: excluded wins over list |
## Using from Vitest
```ts
import {
bootHarness,
tearDownHarness,
serverA,
serverB,
seed,
} from '../../tools/federation-harness/harness.js';
import type { HarnessHandle } from '../../tools/federation-harness/harness.js';
let handle: HarnessHandle;
beforeAll(async () => {
handle = await bootHarness();
}, 180_000); // allow 3 min for Docker pull + NestJS cold start
afterAll(async () => {
await tearDownHarness(handle);
});
test('variant A: list tasks returns personal tasks', async () => {
// NOTE: Only 'all' is supported for now — per-variant narrowing is M3-11.
const seedResult = await seed(handle, 'all');
const a = serverA(handle);
const res = await fetch(`${a.baseUrl}/api/federation/tasks`, {
headers: { 'x-federation-grant': seedResult.grants.variantA.id },
});
expect(res.status).toBe(200);
});
```
> **Note:** `seed()` bootstraps a fresh admin user on each gateway via
> `POST /api/bootstrap/setup`. Both gateways must have zero users (pristine DB).
> If either gateway already has users, `seed()` throws with a clear error.
> Reset state with `docker compose down -v`.
The `bootHarness()` function is **idempotent**: if both gateways are already
healthy, it reuses the running stack and returns `ownedStack: false`. Tests
should not call `tearDownHarness` when `ownedStack` is false unless they
explicitly want to shut down a shared stack.
## Vitest Config (pnpm test:federation)
Add to `vitest.config.ts` at repo root (or a dedicated config):
```ts
// vitest.federation.config.ts
import { defineConfig } from 'vitest/config';
export default defineConfig({
test: {
include: ['**/*.federation.test.ts'],
testTimeout: 60_000,
hookTimeout: 180_000,
reporters: ['verbose'],
},
});
```
Then add to root `package.json`:
```json
"test:federation": "vitest run --config vitest.federation.config.ts"
```
## Nuking State
```bash
# Remove containers AND volumes (ephemeral state — CA keys, DBs, everything)
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml down -v
```
On next `up`, Step-CA re-initialises from scratch and generates new CA keys.
## Step-CA Root Certificate
The CA root lives in the `fed-harness-step-ca` Docker volume at
`/home/step/certs/root_ca.crt`. To extract it to the host:
```bash
docker run --rm \
-v fed-harness-step-ca:/home/step \
alpine cat /home/step/certs/root_ca.crt > /tmp/fed-harness-root-ca.crt
```
## Troubleshooting
### Port conflicts
Default host ports: 14001, 14002, 15432, 15433, 16379, 16380, 19000.
Override via environment variables before `docker compose up`:
```bash
GATEWAY_A_HOST_PORT=14101 GATEWAY_B_HOST_PORT=14102 \
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d
```
### Image pull failures
The gateway image is digest-pinned to:
```
git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
```
(sha-9f1a081, post-#491 IMG-FIX)
If the registry is unreachable, Docker will use the locally cached image if
present. If no local image exists, the compose up will fail with a pull error.
In that case:
1. Ensure you can reach `git.mosaicstack.dev` (VPN, DNS, etc.).
2. Log in: `docker login git.mosaicstack.dev`
3. Pull manually: `docker pull git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02`
### NestJS cold start
Gateway containers take 4060 seconds to become healthy on first boot (Node.js
module resolution + NestJS DI bootstrap). The `start_period: 60s` in the
compose healthcheck covers this. `bootHarness()` polls for up to 3 minutes.
### Step-CA startup
Step-CA initialises on first boot (generates CA keys). This takes ~5-10s.
The `start_period: 30s` in the healthcheck covers it. Both gateways wait for
Step-CA to be healthy before starting (`depends_on: step-ca: condition: service_healthy`).
### dev-password missing
The Step-CA container requires `infra/step-ca/dev-password` to be mounted.
Copy the example and set a local password:
```bash
cp infra/step-ca/dev-password.example infra/step-ca/dev-password
# Edit the file to set your preferred dev CA password
```
The file is `.gitignore`d — do not commit it.
## Image Digest Note
The gateway image is pinned to `sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02`
(sha-9f1a081). This is the digest promoted by PR #491 (IMG-FIX). The `latest`
tag is forbidden per Mosaic image policy. When a new gateway build is promoted,
update the digest in `docker-compose.two-gateways.yml` and in this file.
## Known Limitations
### BETTER_AUTH_URL enrollment URL bug (upstream production code — not yet fixed)
`apps/gateway/src/federation/federation.controller.ts:145` constructs the
enrollment URL using `process.env['BETTER_AUTH_URL'] ?? 'http://localhost:14242'`.
This is an upstream bug: `BETTER_AUTH_URL` is the Better Auth origin (typically
the web app), not the gateway's own base URL. In non-harness deployments this
produces an enrollment URL pointing to the wrong host or port.
**How the harness handles this:**
1. **In-cluster calls (container-to-container):** The compose file sets
`BETTER_AUTH_URL: 'http://gateway-b:3000'` so the enrollment URL returned by
the gateway uses the Docker internal hostname. This lets other containers in the
`fed-test-net` network resolve and reach Server B's enrollment endpoint.
2. **Host-side URL rewrite (seed script):** The `seed.ts` script runs on the host
machine where `gateway-b` is not a resolvable hostname. Before calling
`fetch(enrollmentUrl, ...)`, the seed script rewrites the URL: it extracts only
the token path segment from `enrollmentUrl` and reassembles the URL using the
host-accessible `serverBUrl` (default: `http://localhost:14002`). This lets the
seed script redeem enrollment tokens from the host without being affected by the
in-cluster hostname in the returned URL.
**TODO:** Fix `federation.controller.ts` to derive the enrollment URL from its own
listening address (e.g. `GATEWAY_BASE_URL` env var or a dedicated
`FEDERATION_ENROLLMENT_BASE_URL` env var) rather than reusing `BETTER_AUTH_URL`.
Tracked as a follow-up to PR #505 — do not bundle with harness changes.
## Permanent Infrastructure
This harness is designed to outlive M3 and be reused by M4+ milestone tests.
It is not a throwaway scaffold — treat it as production test infrastructure:
- Keep it idempotent.
- Do not hardcode test assumptions in the harness layer (put them in tests).
- Update the seed script when new scope variants are needed.
- The README and harness should be kept in sync as the federation API evolves.

View File

@@ -1,247 +0,0 @@
# tools/federation-harness/docker-compose.two-gateways.yml
#
# Two-gateway federation test harness — local-only, no Portainer/Swarm needed.
#
# USAGE (manual):
# docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d
# docker compose -f tools/federation-harness/docker-compose.two-gateways.yml down -v
#
# USAGE (from harness.ts):
# const handle = await bootHarness();
# ...
# await tearDownHarness(handle);
#
# TOPOLOGY:
# gateway-a — "home" instance (Server A, the requesting side)
# └── postgres-a (pgvector/pg17, port 15432)
# └── valkey-a (port 16379)
# gateway-b — "work" instance (Server B, the serving side)
# └── postgres-b (pgvector/pg17, port 15433)
# └── valkey-b (port 16380)
# step-ca — shared CA for both gateways (port 19000)
#
# All services share the `fed-test-net` bridge network.
# Host port ranges (15432-15433, 16379-16380, 14001-14002, 19000) are chosen
# to avoid collision with the base dev stack (5433, 6380, 14242, 9000).
#
# IMAGE:
# Pinned to the immutable digest sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
# (sha-9f1a081, post-#491 IMG-FIX, smoke-tested locally).
# Update this digest only after a new CI build is promoted to the registry.
#
# STEP-CA:
# Single shared Step-CA instance. Both gateways connect to it.
# CA volume is ephemeral per `docker compose down -v`; regenerated on next up.
# The harness seed script provisions the CA roots cross-trust after first boot.
services:
# ─── Shared Certificate Authority ────────────────────────────────────────────
step-ca:
image: smallstep/step-ca:0.27.4
container_name: fed-harness-step-ca
restart: unless-stopped
ports:
- '${STEP_CA_HOST_PORT:-19000}:9000'
volumes:
- step_ca_data:/home/step
- ../../infra/step-ca/init.sh:/usr/local/bin/mosaic-step-ca-init.sh:ro
- ../../infra/step-ca/templates:/etc/step-ca-templates:ro
- ../../infra/step-ca/dev-password:/run/secrets/ca_password:ro
entrypoint: ['/bin/sh', '/usr/local/bin/mosaic-step-ca-init.sh']
networks:
- fed-test-net
healthcheck:
test:
[
'CMD',
'step',
'ca',
'health',
'--ca-url',
'https://localhost:9000',
'--root',
'/home/step/certs/root_ca.crt',
]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
# ─── Server A — Home / Requesting Gateway ────────────────────────────────────
postgres-a:
image: pgvector/pgvector:pg17
container_name: fed-harness-postgres-a
restart: unless-stopped
ports:
- '${PG_A_HOST_PORT:-15432}:5432'
environment:
POSTGRES_USER: mosaic
POSTGRES_PASSWORD: mosaic
POSTGRES_DB: mosaic
volumes:
- pg_a_data:/var/lib/postgresql/data
- ../../infra/pg-init:/docker-entrypoint-initdb.d:ro
networks:
- fed-test-net
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U mosaic']
interval: 5s
timeout: 3s
retries: 5
valkey-a:
image: valkey/valkey:8-alpine
container_name: fed-harness-valkey-a
restart: unless-stopped
ports:
- '${VALKEY_A_HOST_PORT:-16379}:6379'
volumes:
- valkey_a_data:/data
networks:
- fed-test-net
healthcheck:
test: ['CMD', 'valkey-cli', 'ping']
interval: 5s
timeout: 3s
retries: 5
gateway-a:
image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
# Tag for human reference: sha-9f1a081 (post-#491 IMG-FIX; smoke-tested locally)
container_name: fed-harness-gateway-a
restart: unless-stopped
ports:
- '${GATEWAY_A_HOST_PORT:-14001}:3000'
environment:
MOSAIC_TIER: federated
DATABASE_URL: postgres://mosaic:mosaic@postgres-a:5432/mosaic
VALKEY_URL: redis://valkey-a:6379
GATEWAY_PORT: '3000'
GATEWAY_CORS_ORIGIN: 'http://localhost:14001'
BETTER_AUTH_SECRET: harness-secret-server-a-do-not-use-in-prod
BETTER_AUTH_URL: 'http://gateway-a:3000'
STEP_CA_URL: 'https://step-ca:9000'
FEDERATION_PEER_HOSTNAME: gateway-a
# Bootstrap password for POST /api/bootstrap/setup — used by seed.ts to create
# the first admin user. Only valid on a pristine (zero-user) database.
# Not the same as ADMIN_API_KEY — there is no static API key in the gateway.
ADMIN_BOOTSTRAP_PASSWORD: harness-admin-password-a
depends_on:
postgres-a:
condition: service_healthy
valkey-a:
condition: service_healthy
step-ca:
condition: service_healthy
networks:
- fed-test-net
healthcheck:
test:
[
'CMD',
'node',
'-e',
"require('http').get('http://127.0.0.1:3000/api/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 10s
timeout: 5s
retries: 5
start_period: 60s
# ─── Server B — Work / Serving Gateway ──────────────────────────────────────
postgres-b:
image: pgvector/pgvector:pg17
container_name: fed-harness-postgres-b
restart: unless-stopped
ports:
- '${PG_B_HOST_PORT:-15433}:5432'
environment:
POSTGRES_USER: mosaic
POSTGRES_PASSWORD: mosaic
POSTGRES_DB: mosaic
volumes:
- pg_b_data:/var/lib/postgresql/data
- ../../infra/pg-init:/docker-entrypoint-initdb.d:ro
networks:
- fed-test-net
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U mosaic']
interval: 5s
timeout: 3s
retries: 5
valkey-b:
image: valkey/valkey:8-alpine
container_name: fed-harness-valkey-b
restart: unless-stopped
ports:
- '${VALKEY_B_HOST_PORT:-16380}:6379'
volumes:
- valkey_b_data:/data
networks:
- fed-test-net
healthcheck:
test: ['CMD', 'valkey-cli', 'ping']
interval: 5s
timeout: 3s
retries: 5
gateway-b:
image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
# Tag for human reference: sha-9f1a081 (post-#491 IMG-FIX; smoke-tested locally)
container_name: fed-harness-gateway-b
restart: unless-stopped
ports:
- '${GATEWAY_B_HOST_PORT:-14002}:3000'
environment:
MOSAIC_TIER: federated
DATABASE_URL: postgres://mosaic:mosaic@postgres-b:5432/mosaic
VALKEY_URL: redis://valkey-b:6379
GATEWAY_PORT: '3000'
GATEWAY_CORS_ORIGIN: 'http://localhost:14002'
BETTER_AUTH_SECRET: harness-secret-server-b-do-not-use-in-prod
BETTER_AUTH_URL: 'http://gateway-b:3000'
STEP_CA_URL: 'https://step-ca:9000'
FEDERATION_PEER_HOSTNAME: gateway-b
# Bootstrap password for POST /api/bootstrap/setup — used by seed.ts to create
# the first admin user. Only valid on a pristine (zero-user) database.
# Not the same as ADMIN_API_KEY — there is no static API key in the gateway.
ADMIN_BOOTSTRAP_PASSWORD: harness-admin-password-b
depends_on:
postgres-b:
condition: service_healthy
valkey-b:
condition: service_healthy
step-ca:
condition: service_healthy
networks:
- fed-test-net
healthcheck:
test:
[
'CMD',
'node',
'-e',
"require('http').get('http://127.0.0.1:3000/api/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 10s
timeout: 5s
retries: 5
start_period: 60s
networks:
fed-test-net:
name: fed-test-net
driver: bridge
volumes:
step_ca_data:
name: fed-harness-step-ca
pg_a_data:
name: fed-harness-pg-a
valkey_a_data:
name: fed-harness-valkey-a
pg_b_data:
name: fed-harness-pg-b
valkey_b_data:
name: fed-harness-valkey-b

View File

@@ -1,290 +0,0 @@
/**
* tools/federation-harness/harness.ts
*
* Vitest-consumable helpers for the two-gateway federation harness.
*
* USAGE (in a vitest test file):
*
* import { bootHarness, tearDownHarness, serverA, serverB, seed } from
* '../../tools/federation-harness/harness.js';
*
* let handle: HarnessHandle;
*
* beforeAll(async () => {
* handle = await bootHarness();
* }, 180_000);
*
* afterAll(async () => {
* await tearDownHarness(handle);
* });
*
* test('variant A — list tasks', async () => {
* const seedResult = await seed(handle, 'all');
* const a = serverA(handle);
* const res = await fetch(`${a.baseUrl}/api/federation/list/tasks`, {
* headers: { Authorization: `Bearer ${seedResult.adminTokenA}` },
* });
* expect(res.status).toBe(200);
* });
*
* NOTE: The `seed()` helper currently only supports scenario='all'. Passing any
* other value throws immediately. Per-variant narrowing is deferred to M3-11.
*
* ESM / NodeNext: all imports use .js extensions.
*/
import { execSync, execFileSync } from 'node:child_process';
import { resolve, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
import { runSeed, type SeedResult } from './seed.js';
// ─── Types ───────────────────────────────────────────────────────────────────
export interface GatewayAccessor {
/** Base URL reachable from the host machine, e.g. http://localhost:14001 */
baseUrl: string;
/** Bootstrap password used for POST /api/bootstrap/setup on a pristine gateway */
bootstrapPassword: string;
/** Internal Docker network hostname (for container-to-container calls) */
internalHostname: string;
}
export interface HarnessHandle {
/** Server A accessor */
a: GatewayAccessor;
/** Server B accessor */
b: GatewayAccessor;
/** Absolute path to the docker-compose file */
composeFile: string;
/** Whether this instance booted the stack (vs. reusing an existing one) */
ownedStack: boolean;
/** Optional seed result if seed() was called */
seedResult?: SeedResult;
}
/**
* Scenario to seed. Currently only 'all' is implemented; per-variant narrowing
* is tracked as M3-11. Passing any other value throws immediately with a clear
* error rather than silently over-seeding.
*/
export type SeedScenario = 'variantA' | 'variantB' | 'variantC' | 'all';
// ─── Constants ────────────────────────────────────────────────────────────────
const __dirname = dirname(fileURLToPath(import.meta.url));
const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
const GATEWAY_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
const GATEWAY_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
const ADMIN_BOOTSTRAP_PASSWORD_A =
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
const ADMIN_BOOTSTRAP_PASSWORD_B =
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
const READINESS_TIMEOUT_MS = 180_000;
const READINESS_POLL_MS = 3_000;
// ─── Internal helpers ─────────────────────────────────────────────────────────
async function isGatewayHealthy(baseUrl: string): Promise<boolean> {
try {
const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) });
return res.ok;
} catch {
return false;
}
}
/**
* Poll both gateways in parallel until both are healthy or the shared deadline
* expires. Polling in parallel (rather than sequentially) avoids the bug where
* a slow gateway-a consumes all of the readiness budget before gateway-b is
* checked.
*/
async function waitForStack(handle: HarnessHandle): Promise<void> {
const gateways: Array<{ label: string; url: string }> = [
{ label: 'gateway-a', url: handle.a.baseUrl },
{ label: 'gateway-b', url: handle.b.baseUrl },
];
await Promise.all(
gateways.map(async (gw) => {
// Each gateway gets its own independent deadline.
const deadline = Date.now() + READINESS_TIMEOUT_MS;
process.stdout.write(`[harness] Waiting for ${gw.label}...`);
while (Date.now() < deadline) {
if (await isGatewayHealthy(gw.url)) {
process.stdout.write(` ready\n`);
return;
}
if (Date.now() + READINESS_POLL_MS > deadline) {
throw new Error(
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
);
}
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
process.stdout.write('.');
}
throw new Error(
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
);
}),
);
}
function isStackRunning(): boolean {
try {
const output = execFileSync(
'docker',
['compose', '-f', COMPOSE_FILE, 'ps', '--format', 'json'],
{ encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] },
);
if (!output.trim()) return false;
// Parse JSON lines — each running service emits a JSON object per line
const lines = output.trim().split('\n').filter(Boolean);
const runningServices = lines.filter((line) => {
try {
const obj = JSON.parse(line) as { State?: string };
return obj.State === 'running';
} catch {
return false;
}
});
// Expect at least gateway-a and gateway-b running
return runningServices.length >= 2;
} catch {
return false;
}
}
// ─── Public API ───────────────────────────────────────────────────────────────
/**
* Boot the harness stack.
*
* Idempotent: if the stack is already running and both gateways are healthy,
* this function reuses the existing stack and returns a handle with
* `ownedStack: false`. Callers that set `ownedStack: false` should NOT call
* `tearDownHarness` unless they explicitly want to tear down a pre-existing stack.
*
* If the stack is not running, it starts it with `docker compose up -d` and
* waits for both gateways to pass their /api/health probe.
*/
export async function bootHarness(): Promise<HarnessHandle> {
const handle: HarnessHandle = {
a: {
baseUrl: GATEWAY_A_URL,
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_A,
internalHostname: 'gateway-a',
},
b: {
baseUrl: GATEWAY_B_URL,
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_B,
internalHostname: 'gateway-b',
},
composeFile: COMPOSE_FILE,
ownedStack: false,
};
// Check if both gateways are already healthy
const [aHealthy, bHealthy] = await Promise.all([
isGatewayHealthy(handle.a.baseUrl),
isGatewayHealthy(handle.b.baseUrl),
]);
if (aHealthy && bHealthy) {
console.log('[harness] Stack already running — reusing existing stack.');
handle.ownedStack = false;
return handle;
}
console.log('[harness] Starting federation harness stack...');
execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' });
handle.ownedStack = true;
await waitForStack(handle);
console.log('[harness] Stack is ready.');
return handle;
}
/**
* Tear down the harness stack.
*
* Runs `docker compose down -v` to remove containers AND volumes (ephemeral state).
* Only tears down if `handle.ownedStack` is true unless `force` is set.
*/
export async function tearDownHarness(
handle: HarnessHandle,
opts?: { force?: boolean },
): Promise<void> {
if (!handle.ownedStack && !opts?.force) {
console.log(
'[harness] Stack not owned by this handle — skipping teardown (pass force: true to override).',
);
return;
}
console.log('[harness] Tearing down federation harness stack...');
execSync(`docker compose -f "${handle.composeFile}" down -v`, { stdio: 'inherit' });
console.log('[harness] Stack torn down.');
}
/**
* Return the Server A accessor from a harness handle.
* Convenience wrapper for test readability.
*/
export function serverA(handle: HarnessHandle): GatewayAccessor {
return handle.a;
}
/**
* Return the Server B accessor from a harness handle.
* Convenience wrapper for test readability.
*/
export function serverB(handle: HarnessHandle): GatewayAccessor {
return handle.b;
}
/**
* Seed the harness with test data for one or more scenarios.
*
* @param handle The harness handle returned by bootHarness().
* @param scenario Which scope variants to provision. Currently only 'all' is
* supported — passing any other value throws immediately with a
* clear error. Per-variant narrowing is tracked as M3-11.
*
* Returns a SeedResult with grant IDs, peer IDs, and admin tokens for each
* gateway, which test assertions can reference.
*
* IMPORTANT: The harness assumes a pristine database on both gateways. The seed
* bootstraps an admin user on each gateway via POST /api/bootstrap/setup. If
* either gateway already has users, seed() throws with a clear error message.
* Run 'docker compose down -v' to reset state.
*/
export async function seed(
handle: HarnessHandle,
scenario: SeedScenario = 'all',
): Promise<SeedResult> {
if (scenario !== 'all') {
throw new Error(
`seed: scenario narrowing not yet implemented; pass "all" for now. ` +
`Got: "${scenario}". Per-variant narrowing is tracked as M3-11.`,
);
}
const result = await runSeed({
serverAUrl: handle.a.baseUrl,
serverBUrl: handle.b.baseUrl,
adminBootstrapPasswordA: handle.a.bootstrapPassword,
adminBootstrapPasswordB: handle.b.bootstrapPassword,
});
handle.seedResult = result;
return result;
}

View File

@@ -1,603 +0,0 @@
#!/usr/bin/env tsx
/**
* tools/federation-harness/seed.ts
*
* Provisions test data for the two-gateway federation harness.
* Run via: tsx tools/federation-harness/seed.ts
*
* What this script does:
* 1. (Optional) Boots the compose stack if --boot flag is passed.
* 2. Waits for both gateways to be healthy.
* 3. Bootstraps an admin user + token on each gateway via POST /api/bootstrap/setup.
* 4. Creates three grants on Server B matching the M3 acceptance test scenarios:
* - Scope variant A: tasks + notes, include_personal: true
* - Scope variant B: tasks only, include_teams: ['T1'], exclude T2
* - Scope variant C: tasks + credentials in resources, credentials excluded (sanity)
* 5. For each grant, walks the full enrollment flow:
* a. Server B creates a peer keypair (represents the requesting side).
* b. Server B creates the grant referencing that peer.
* c. Server B issues an enrollment token.
* d. Server A creates its own peer keypair (represents its view of B).
* e. Server A redeems the enrollment token at Server B's enrollment endpoint,
* submitting A's CSR → receives signed cert back.
* f. Server A stores the cert on its peer record → peer becomes active.
* 6. Inserts representative test tasks/notes/credentials on Server B.
*
* IMPORTANT: This script uses the real admin REST API — no direct DB writes.
* It exercises the full enrollment flow as M3 acceptance tests will.
*
* ESM / NodeNext: all imports use .js extensions.
*/
import { execSync } from 'node:child_process';
import { resolve, dirname } from 'node:path';
import { fileURLToPath } from 'node:url';
// ─── Constants ───────────────────────────────────────────────────────────────
const __dirname = dirname(fileURLToPath(import.meta.url));
const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
/** Base URLs as seen from the host machine (mapped host ports). */
const SERVER_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
const SERVER_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
/**
* Bootstrap passwords used when calling POST /api/bootstrap/setup on each
* gateway. Each gateway starts with zero users and requires a one-time setup
* call before any admin-guarded endpoints can be used.
*/
const ADMIN_BOOTSTRAP_PASSWORD_A =
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
const ADMIN_BOOTSTRAP_PASSWORD_B =
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
const READINESS_TIMEOUT_MS = 120_000;
const READINESS_POLL_MS = 3_000;
// ─── Scope variant definitions (for M3 acceptance tests) ─────────────────────
/** Scope variant A — tasks + notes, personal data included. */
export const SCOPE_VARIANT_A = {
resources: ['tasks', 'notes'],
filters: {
tasks: { include_personal: true },
notes: { include_personal: true },
},
excluded_resources: [] as string[],
max_rows_per_query: 500,
};
/** Scope variant B — tasks only, team T1 only, no personal. */
export const SCOPE_VARIANT_B = {
resources: ['tasks'],
filters: {
tasks: { include_teams: ['T1'], include_personal: false },
},
excluded_resources: [] as string[],
max_rows_per_query: 500,
};
/**
* Scope variant C — tasks + credentials in resources list, but credentials
* explicitly in excluded_resources. Sanity test: credentials must still be
* inaccessible even though they appear in resources.
*/
export const SCOPE_VARIANT_C = {
resources: ['tasks', 'credentials'],
filters: {
tasks: { include_personal: true },
},
excluded_resources: ['credentials'],
max_rows_per_query: 500,
};
// ─── Inline types (no import from packages/types — M3-01 branch not yet merged) ─
interface AdminFetchOptions {
method?: string;
body?: unknown;
adminToken: string;
}
interface PeerRecord {
peerId: string;
csrPem: string;
}
interface GrantRecord {
id: string;
status: string;
scope: unknown;
}
interface EnrollmentTokenResult {
token: string;
expiresAt: string;
enrollmentUrl: string;
}
interface EnrollmentRedeemResult {
certPem: string;
certChainPem: string;
}
interface BootstrapResult {
adminUserId: string;
adminToken: string;
}
export interface SeedResult {
serverAUrl: string;
serverBUrl: string;
adminTokenA: string;
adminTokenB: string;
adminUserIdA: string;
adminUserIdB: string;
grants: {
variantA: GrantRecord;
variantB: GrantRecord;
variantC: GrantRecord;
};
peers: {
variantA: PeerRecord & { grantId: string };
variantB: PeerRecord & { grantId: string };
variantC: PeerRecord & { grantId: string };
};
}
// ─── HTTP helpers ─────────────────────────────────────────────────────────────
/**
* Authenticated admin fetch. Sends `Authorization: Bearer <adminToken>` which
* is the only path supported by AdminGuard (DB-backed sha256 token lookup).
* No `x-admin-key` header path exists in the gateway.
*/
async function adminFetch<T>(baseUrl: string, path: string, opts: AdminFetchOptions): Promise<T> {
const url = `${baseUrl}${path}`;
const res = await fetch(url, {
method: opts.method ?? 'GET',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${opts.adminToken}`,
},
body: opts.body !== undefined ? JSON.stringify(opts.body) : undefined,
});
if (!res.ok) {
const text = await res.text().catch(() => '(no body)');
throw new Error(`${opts.method ?? 'GET'} ${url}${res.status}: ${text}`);
}
return res.json() as Promise<T>;
}
// ─── Admin bootstrap ──────────────────────────────────────────────────────────
/**
* Bootstrap an admin user on a pristine gateway.
*
* Steps:
* 1. GET /api/bootstrap/status — confirms needsSetup === true.
* 2. POST /api/bootstrap/setup with { name, email, password } — returns
* { user, token: { plaintext } }.
*
* The harness assumes a fresh DB. If needsSetup is false the harness fails
* fast with a clear error rather than proceeding with an unknown token.
*/
async function bootstrapAdmin(
baseUrl: string,
label: string,
password: string,
): Promise<BootstrapResult> {
console.log(`[seed] Bootstrapping admin on ${label} (${baseUrl})...`);
// 1. Check status
const statusRes = await fetch(`${baseUrl}/api/bootstrap/status`);
if (!statusRes.ok) {
throw new Error(`[seed] GET ${baseUrl}/api/bootstrap/status → ${statusRes.status.toString()}`);
}
const status = (await statusRes.json()) as { needsSetup: boolean };
if (!status.needsSetup) {
throw new Error(
`[seed] ${label} at ${baseUrl} already has users (needsSetup=false). ` +
`The harness requires a pristine database. Run 'docker compose down -v' to reset.`,
);
}
// 2. Bootstrap
const setupRes = await fetch(`${baseUrl}/api/bootstrap/setup`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
name: `Harness Admin (${label})`,
email: `harness-admin-${label.toLowerCase().replace(/\s+/g, '-')}@example.invalid`,
password,
}),
});
if (!setupRes.ok) {
const body = await setupRes.text().catch(() => '(no body)');
throw new Error(
`[seed] POST ${baseUrl}/api/bootstrap/setup → ${setupRes.status.toString()}: ${body}`,
);
}
const result = (await setupRes.json()) as {
user: { id: string };
token: { plaintext: string };
};
console.log(`[seed] ${label} admin user: ${result.user.id}`);
console.log(`[seed] ${label} admin token: ${result.token.plaintext.slice(0, 8)}...`);
return {
adminUserId: result.user.id,
adminToken: result.token.plaintext,
};
}
// ─── Readiness probe ──────────────────────────────────────────────────────────
async function waitForGateway(baseUrl: string, label: string): Promise<void> {
const deadline = Date.now() + READINESS_TIMEOUT_MS;
let lastError: string = '';
while (Date.now() < deadline) {
try {
const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) });
if (res.ok) {
console.log(`[seed] ${label} is ready (${baseUrl})`);
return;
}
lastError = `HTTP ${res.status.toString()}`;
} catch (err) {
lastError = err instanceof Error ? err.message : String(err);
}
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
}
throw new Error(
`[seed] ${label} did not become ready within ${READINESS_TIMEOUT_MS.toString()}ms — last error: ${lastError}`,
);
}
// ─── Enrollment flow ──────────────────────────────────────────────────────────
/**
* Walk the full enrollment flow for one grant.
*
* The correct two-sided flow (matching the data model's FK semantics):
*
* 1. On Server B: POST /api/admin/federation/peers/keypair
* → peerId_B (Server B's peer record representing the requesting side)
* 2. On Server B: POST /api/admin/federation/grants with peerId: peerId_B
* → grant (FK to Server B's own federation_peers table — no violation)
* 3. On Server B: POST /api/admin/federation/grants/:id/tokens
* → enrollmentUrl pointing back to Server B
* 4. On Server A: POST /api/admin/federation/peers/keypair
* → peerId_A + csrPem_A (Server A's local record of Server B)
* 5. Server A → Server B: POST enrollmentUrl with { csrPem: csrPem_A }
* → certPem signed by Server B's CA
* 6. On Server A: PATCH /api/admin/federation/peers/:peerId_A/cert with certPem
* → Server A's peer record transitions to active
*
* Returns the activated grant (from Server B) and Server A's peer record.
*/
async function enrollGrant(opts: {
label: string;
subjectUserId: string;
scope: unknown;
adminTokenA: string;
adminTokenB: string;
serverAUrl: string;
serverBUrl: string;
}): Promise<{ grant: GrantRecord; peer: PeerRecord & { grantId: string } }> {
const { label, subjectUserId, scope, adminTokenA, adminTokenB, serverAUrl, serverBUrl } = opts;
console.log(`\n[seed] Enrolling grant for scope variant ${label}...`);
// 1. Create peer keypair on Server B (represents the requesting peer from B's perspective)
const peerB = await adminFetch<PeerRecord>(serverBUrl, '/api/admin/federation/peers/keypair', {
method: 'POST',
adminToken: adminTokenB,
body: {
commonName: `harness-peer-${label.toLowerCase()}-from-b`,
displayName: `Harness Peer ${label} (Server A as seen from B)`,
endpointUrl: serverAUrl,
},
});
console.log(`[seed] Created peer on B: ${peerB.peerId}`);
// 2. Create grant on Server B referencing B's own peer record
const grant = await adminFetch<GrantRecord>(serverBUrl, '/api/admin/federation/grants', {
method: 'POST',
adminToken: adminTokenB,
body: {
peerId: peerB.peerId,
subjectUserId,
scope,
},
});
console.log(`[seed] Created grant on B: ${grant.id} (status: ${grant.status})`);
// 3. Generate enrollment token on Server B
const tokenResult = await adminFetch<EnrollmentTokenResult>(
serverBUrl,
`/api/admin/federation/grants/${grant.id}/tokens`,
{ method: 'POST', adminToken: adminTokenB, body: { ttlSeconds: 900 } },
);
console.log(`[seed] Enrollment token: ${tokenResult.token.slice(0, 8)}...`);
console.log(`[seed] Enrollment URL: ${tokenResult.enrollmentUrl}`);
// 4. Create peer keypair on Server A (Server A's local record of Server B)
const peerA = await adminFetch<PeerRecord>(serverAUrl, '/api/admin/federation/peers/keypair', {
method: 'POST',
adminToken: adminTokenA,
body: {
commonName: `harness-peer-${label.toLowerCase()}-from-a`,
displayName: `Harness Peer ${label} (Server B as seen from A)`,
endpointUrl: serverBUrl,
},
});
console.log(`[seed] Created peer on A: ${peerA.peerId}`);
// 5. Redeem token at Server B's enrollment endpoint with A's CSR.
// The enrollment endpoint is not admin-guarded — the one-time token IS the credential.
//
// The enrollmentUrl returned by the gateway is built using BETTER_AUTH_URL which
// resolves to the in-cluster Docker hostname (gateway-b:3000). That URL is only
// reachable from other containers, not from the host machine running this script.
// We rewrite the host portion to use the host-accessible serverBUrl so the
// seed script can reach the endpoint from the host.
const parsedEnrollment = new URL(tokenResult.enrollmentUrl);
const tokenSegment = parsedEnrollment.pathname.split('/').pop()!;
const redeemUrl = `${serverBUrl}/api/federation/enrollment/${tokenSegment}`;
console.log(`[seed] Rewritten redeem URL (host-accessible): ${redeemUrl}`);
const redeemRes = await fetch(redeemUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ csrPem: peerA.csrPem }),
});
if (!redeemRes.ok) {
const body = await redeemRes.text().catch(() => '(no body)');
throw new Error(`Enrollment redemption failed: ${redeemRes.status.toString()}${body}`);
}
const redeemResult = (await redeemRes.json()) as EnrollmentRedeemResult;
console.log(`[seed] Cert issued (${redeemResult.certPem.length.toString()} bytes)`);
// 6. Store cert on Server A's peer record → transitions to active
await adminFetch<unknown>(serverAUrl, `/api/admin/federation/peers/${peerA.peerId}/cert`, {
method: 'PATCH',
adminToken: adminTokenA,
body: { certPem: redeemResult.certPem },
});
console.log(`[seed] Cert stored on A — peer ${peerA.peerId} is now active`);
// Verify grant flipped to active on B
const activeGrant = await adminFetch<GrantRecord>(
serverBUrl,
`/api/admin/federation/grants/${grant.id}`,
{ adminToken: adminTokenB },
);
console.log(`[seed] Grant status on B: ${activeGrant.status}`);
return { grant: activeGrant, peer: { ...peerA, grantId: grant.id } };
}
// ─── Test data insertion ──────────────────────────────────────────────────────
/**
* Insert representative test data on Server B via its admin APIs.
*
* NOTE: The gateway's task/note/credential APIs require an authenticated user
* session. For the harness, we seed via admin-level endpoints if available,
* or document the gap here for M3-11 to fill in with proper user session seeding.
*
* ASSUMPTION: Server B exposes POST /api/admin/tasks (or similar) for test data.
* If that endpoint does not yet exist, this function logs a warning and skips
* without failing — M3-11 will add the session-based seeding path.
*/
async function seedTestData(
subjectUserId: string,
scopeLabel: string,
serverBUrl: string,
adminTokenB: string,
): Promise<void> {
console.log(`\n[seed] Seeding test data on Server B for ${scopeLabel}...`);
const testTasks = [
{
title: `${scopeLabel} Task 1`,
description: 'Federation harness test task',
userId: subjectUserId,
},
{
title: `${scopeLabel} Task 2`,
description: 'Team-scoped test task',
userId: subjectUserId,
teamId: 'T1',
},
];
const testNotes = [
{
title: `${scopeLabel} Note 1`,
content: 'Personal note for federation test',
userId: subjectUserId,
},
];
// Attempt to insert — tolerate 404 (endpoint not yet implemented)
for (const task of testTasks) {
try {
await adminFetch<unknown>(serverBUrl, '/api/admin/tasks', {
method: 'POST',
adminToken: adminTokenB,
body: task,
});
console.log(`[seed] Inserted task: "${task.title}"`);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
if (msg.includes('404') || msg.includes('Cannot POST')) {
console.warn(
`[seed] WARN: /api/admin/tasks not found — skipping task insertion (expected until M3-11)`,
);
break;
}
throw err;
}
}
for (const note of testNotes) {
try {
await adminFetch<unknown>(serverBUrl, '/api/admin/notes', {
method: 'POST',
adminToken: adminTokenB,
body: note,
});
console.log(`[seed] Inserted note: "${note.title}"`);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
if (msg.includes('404') || msg.includes('Cannot POST')) {
console.warn(
`[seed] WARN: /api/admin/notes not found — skipping note insertion (expected until M3-11)`,
);
break;
}
throw err;
}
}
console.log(`[seed] Test data seeding for ${scopeLabel} complete.`);
}
// ─── Main entrypoint ──────────────────────────────────────────────────────────
export async function runSeed(opts?: {
serverAUrl?: string;
serverBUrl?: string;
adminBootstrapPasswordA?: string;
adminBootstrapPasswordB?: string;
subjectUserIds?: { variantA: string; variantB: string; variantC: string };
}): Promise<SeedResult> {
const aUrl = opts?.serverAUrl ?? SERVER_A_URL;
const bUrl = opts?.serverBUrl ?? SERVER_B_URL;
const passwordA = opts?.adminBootstrapPasswordA ?? ADMIN_BOOTSTRAP_PASSWORD_A;
const passwordB = opts?.adminBootstrapPasswordB ?? ADMIN_BOOTSTRAP_PASSWORD_B;
// Use provided or default subject user IDs.
// In a real run these would be real user UUIDs from Server B's DB.
// For the harness, the admin bootstrap user on Server B is used as the subject.
// These are overridden after bootstrap if opts.subjectUserIds is not provided.
const subjectIds = opts?.subjectUserIds;
console.log('[seed] Waiting for gateways to be ready...');
await Promise.all([waitForGateway(aUrl, 'Server A'), waitForGateway(bUrl, 'Server B')]);
// Bootstrap admin users on both gateways (requires pristine DBs).
console.log('\n[seed] Bootstrapping admin accounts...');
const [bootstrapA, bootstrapB] = await Promise.all([
bootstrapAdmin(aUrl, 'Server A', passwordA),
bootstrapAdmin(bUrl, 'Server B', passwordB),
]);
// Default subject user IDs to the admin user on Server B (guaranteed to exist).
const resolvedSubjectIds = subjectIds ?? {
variantA: bootstrapB.adminUserId,
variantB: bootstrapB.adminUserId,
variantC: bootstrapB.adminUserId,
};
// Enroll all three scope variants sequentially to avoid race conditions on
// the step-ca signing queue. Parallel enrollment would work too but
// sequential is easier to debug when something goes wrong.
console.log('\n[seed] Enrolling scope variants...');
const resultA = await enrollGrant({
label: 'A',
subjectUserId: resolvedSubjectIds.variantA,
scope: SCOPE_VARIANT_A,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
serverAUrl: aUrl,
serverBUrl: bUrl,
});
const resultB = await enrollGrant({
label: 'B',
subjectUserId: resolvedSubjectIds.variantB,
scope: SCOPE_VARIANT_B,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
serverAUrl: aUrl,
serverBUrl: bUrl,
});
const resultC = await enrollGrant({
label: 'C',
subjectUserId: resolvedSubjectIds.variantC,
scope: SCOPE_VARIANT_C,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
serverAUrl: aUrl,
serverBUrl: bUrl,
});
// Seed test data on Server B for each scope variant
await Promise.all([
seedTestData(resolvedSubjectIds.variantA, 'A', bUrl, bootstrapB.adminToken),
seedTestData(resolvedSubjectIds.variantB, 'B', bUrl, bootstrapB.adminToken),
seedTestData(resolvedSubjectIds.variantC, 'C', bUrl, bootstrapB.adminToken),
]);
const result: SeedResult = {
serverAUrl: aUrl,
serverBUrl: bUrl,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
adminUserIdA: bootstrapA.adminUserId,
adminUserIdB: bootstrapB.adminUserId,
grants: {
variantA: resultA.grant,
variantB: resultB.grant,
variantC: resultC.grant,
},
peers: {
variantA: resultA.peer,
variantB: resultB.peer,
variantC: resultC.peer,
},
};
console.log('\n[seed] Seed complete.');
console.log('[seed] Summary:');
console.log(` Variant A grant: ${result.grants.variantA.id} (${result.grants.variantA.status})`);
console.log(` Variant B grant: ${result.grants.variantB.id} (${result.grants.variantB.status})`);
console.log(` Variant C grant: ${result.grants.variantC.id} (${result.grants.variantC.status})`);
return result;
}
// ─── CLI entry ────────────────────────────────────────────────────────────────
const isCli =
process.argv[1] != null &&
fileURLToPath(import.meta.url).endsWith(process.argv[1]!.split('/').pop()!);
if (isCli) {
const shouldBoot = process.argv.includes('--boot');
if (shouldBoot) {
console.log('[seed] --boot flag detected — starting compose stack...');
execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' });
}
runSeed()
.then(() => {
process.exit(0);
})
.catch((err) => {
console.error('[seed] Fatal:', err);
process.exit(1);
});
}