Adds tools/federation-harness/ — the permanent test bed for M3+ federation E2E tests. Boots two gateways (Server A + Server B) on a shared Docker bridge network with per-gateway Postgres/pgvector + Valkey and a shared Step-CA. - docker-compose.two-gateways.yml: gateway-a/b, postgres-a/b, valkey-a/b, step-ca; image digest-pinned to sha256:1069117740e... (sha-9f1a081, #491) - seed.ts: provisions scope variants A/B/C via real admin REST API; walks full enrollment flow (peer keypair → grant → token → redeem → cert store) - harness.ts: bootHarness/tearDownHarness/serverA/serverB/seed helpers for vitest; idempotent boot (reuses running stack when both gateways healthy) - README.md: prereqs, topology, seed usage, vitest integration, port override, troubleshooting, image digest note No production code modified. Quality gates: typecheck ✓ lint ✓ format ✓ Closes #462 Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
429 lines
15 KiB
TypeScript
429 lines
15 KiB
TypeScript
#!/usr/bin/env tsx
|
|
/**
|
|
* tools/federation-harness/seed.ts
|
|
*
|
|
* Provisions test data for the two-gateway federation harness.
|
|
* Run via: tsx tools/federation-harness/seed.ts
|
|
*
|
|
* What this script does:
|
|
* 1. (Optional) Boots the compose stack if --boot flag is passed.
|
|
* 2. Waits for both gateways to be healthy.
|
|
* 3. Creates three grants on Server B matching the M3 acceptance test scenarios:
|
|
* - Scope variant A: tasks + notes, include_personal: true
|
|
* - Scope variant B: tasks only, include_teams: ['T1'], exclude T2
|
|
* - Scope variant C: tasks + credentials in resources, credentials excluded (sanity)
|
|
* 4. For each grant, walks the enrollment flow so Server A ends up with
|
|
* an active peer + cert + sealed key.
|
|
* 5. Inserts representative test tasks/notes/credentials on Server B.
|
|
*
|
|
* IMPORTANT: This script uses the real admin REST API — no direct DB writes.
|
|
* It exercises the full enrollment flow as M3 acceptance tests will.
|
|
*
|
|
* ESM / NodeNext: all imports use .js extensions.
|
|
*/
|
|
|
|
import { execSync } from 'node:child_process';
|
|
import { resolve, dirname } from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
|
|
// ─── Constants ───────────────────────────────────────────────────────────────
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
|
|
|
|
/** Base URLs as seen from the host machine (mapped host ports). */
|
|
const SERVER_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
|
|
const SERVER_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
|
|
const ADMIN_KEY_A = process.env['ADMIN_KEY_A'] ?? 'harness-admin-key-a';
|
|
const ADMIN_KEY_B = process.env['ADMIN_KEY_B'] ?? 'harness-admin-key-b';
|
|
|
|
const READINESS_TIMEOUT_MS = 120_000;
|
|
const READINESS_POLL_MS = 3_000;
|
|
|
|
// ─── Scope variant definitions (for M3 acceptance tests) ─────────────────────
|
|
|
|
/** Scope variant A — tasks + notes, personal data included. */
|
|
export const SCOPE_VARIANT_A = {
|
|
resources: ['tasks', 'notes'],
|
|
filters: {
|
|
tasks: { include_personal: true },
|
|
notes: { include_personal: true },
|
|
},
|
|
excluded_resources: [] as string[],
|
|
max_rows_per_query: 500,
|
|
};
|
|
|
|
/** Scope variant B — tasks only, team T1 only, no personal. */
|
|
export const SCOPE_VARIANT_B = {
|
|
resources: ['tasks'],
|
|
filters: {
|
|
tasks: { include_teams: ['T1'], include_personal: false },
|
|
},
|
|
excluded_resources: [] as string[],
|
|
max_rows_per_query: 500,
|
|
};
|
|
|
|
/**
|
|
* Scope variant C — tasks + credentials in resources list, but credentials
|
|
* explicitly in excluded_resources. Sanity test: credentials must still be
|
|
* inaccessible even though they appear in resources.
|
|
*/
|
|
export const SCOPE_VARIANT_C = {
|
|
resources: ['tasks', 'credentials'],
|
|
filters: {
|
|
tasks: { include_personal: true },
|
|
},
|
|
excluded_resources: ['credentials'],
|
|
max_rows_per_query: 500,
|
|
};
|
|
|
|
// ─── Inline types (no import from packages/types — M3-01 branch not yet merged) ─
|
|
|
|
interface AdminFetchOptions {
|
|
method?: string;
|
|
body?: unknown;
|
|
adminKey: string;
|
|
}
|
|
|
|
interface PeerRecord {
|
|
peerId: string;
|
|
csrPem: string;
|
|
}
|
|
|
|
interface GrantRecord {
|
|
id: string;
|
|
status: string;
|
|
scope: unknown;
|
|
}
|
|
|
|
interface EnrollmentTokenResult {
|
|
token: string;
|
|
expiresAt: string;
|
|
enrollmentUrl: string;
|
|
}
|
|
|
|
interface EnrollmentRedeemResult {
|
|
certPem: string;
|
|
certChainPem: string;
|
|
}
|
|
|
|
export interface SeedResult {
|
|
serverAUrl: string;
|
|
serverBUrl: string;
|
|
grants: {
|
|
variantA: GrantRecord;
|
|
variantB: GrantRecord;
|
|
variantC: GrantRecord;
|
|
};
|
|
peers: {
|
|
variantA: PeerRecord & { grantId: string };
|
|
variantB: PeerRecord & { grantId: string };
|
|
variantC: PeerRecord & { grantId: string };
|
|
};
|
|
}
|
|
|
|
// ─── HTTP helpers ─────────────────────────────────────────────────────────────
|
|
|
|
async function adminFetch<T>(baseUrl: string, path: string, opts: AdminFetchOptions): Promise<T> {
|
|
const url = `${baseUrl}${path}`;
|
|
const res = await fetch(url, {
|
|
method: opts.method ?? 'GET',
|
|
headers: {
|
|
'Content-Type': 'application/json',
|
|
'x-admin-key': opts.adminKey,
|
|
},
|
|
body: opts.body !== undefined ? JSON.stringify(opts.body) : undefined,
|
|
});
|
|
|
|
if (!res.ok) {
|
|
const text = await res.text().catch(() => '(no body)');
|
|
throw new Error(`${opts.method ?? 'GET'} ${url} → ${res.status}: ${text}`);
|
|
}
|
|
|
|
return res.json() as Promise<T>;
|
|
}
|
|
|
|
// ─── Readiness probe ──────────────────────────────────────────────────────────
|
|
|
|
async function waitForGateway(baseUrl: string, label: string): Promise<void> {
|
|
const deadline = Date.now() + READINESS_TIMEOUT_MS;
|
|
let lastError: string = '';
|
|
|
|
while (Date.now() < deadline) {
|
|
try {
|
|
const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) });
|
|
if (res.ok) {
|
|
console.log(`[seed] ${label} is ready (${baseUrl})`);
|
|
return;
|
|
}
|
|
lastError = `HTTP ${res.status}`;
|
|
} catch (err) {
|
|
lastError = err instanceof Error ? err.message : String(err);
|
|
}
|
|
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
|
|
}
|
|
|
|
throw new Error(
|
|
`[seed] ${label} did not become ready within ${READINESS_TIMEOUT_MS}ms — last error: ${lastError}`,
|
|
);
|
|
}
|
|
|
|
// ─── Enrollment flow ──────────────────────────────────────────────────────────
|
|
|
|
/**
|
|
* Walk the full enrollment flow for one grant:
|
|
* 1. Create a peer keypair on Server A (generates CSR).
|
|
* 2. Create a grant on Server B referencing the peer.
|
|
* 3. Generate an enrollment token on Server B.
|
|
* 4. Redeem the token on Server B with A's CSR → get cert back.
|
|
* 5. Store the cert on Server A's peer record.
|
|
*
|
|
* Returns the activated grant record + peer info.
|
|
*/
|
|
async function enrollGrant(opts: {
|
|
label: string;
|
|
subjectUserId: string;
|
|
scope: unknown;
|
|
}): Promise<{ grant: GrantRecord; peer: PeerRecord & { grantId: string } }> {
|
|
const { label, subjectUserId, scope } = opts;
|
|
console.log(`\n[seed] Enrolling grant for scope variant ${label}...`);
|
|
|
|
// 1. Create peer keypair on Server A
|
|
const peer = await adminFetch<PeerRecord>(SERVER_A_URL, '/api/admin/federation/peers/keypair', {
|
|
method: 'POST',
|
|
adminKey: ADMIN_KEY_A,
|
|
body: {
|
|
commonName: `harness-peer-${label.toLowerCase()}`,
|
|
displayName: `Harness Peer ${label}`,
|
|
endpointUrl: `${SERVER_B_URL}`,
|
|
},
|
|
});
|
|
console.log(`[seed] Created peer on A: ${peer.peerId}`);
|
|
|
|
// 2. Create grant on Server B
|
|
const grant = await adminFetch<GrantRecord>(SERVER_B_URL, '/api/admin/federation/grants', {
|
|
method: 'POST',
|
|
adminKey: ADMIN_KEY_B,
|
|
body: {
|
|
peerId: peer.peerId,
|
|
subjectUserId,
|
|
scope,
|
|
},
|
|
});
|
|
console.log(`[seed] Created grant on B: ${grant.id} (status: ${grant.status})`);
|
|
|
|
// 3. Generate enrollment token on Server B
|
|
const tokenResult = await adminFetch<EnrollmentTokenResult>(
|
|
SERVER_B_URL,
|
|
`/api/admin/federation/grants/${grant.id}/tokens`,
|
|
{ method: 'POST', adminKey: ADMIN_KEY_B, body: { ttlSeconds: 900 } },
|
|
);
|
|
console.log(`[seed] Enrollment token: ${tokenResult.token.slice(0, 8)}...`);
|
|
|
|
// 4. Redeem token on Server B with A's CSR
|
|
// The enrollment endpoint is not admin-guarded — it uses the one-time token.
|
|
const redeemUrl = `${SERVER_B_URL}/api/federation/enrollment/${tokenResult.token}`;
|
|
const redeemRes = await fetch(redeemUrl, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ csrPem: peer.csrPem }),
|
|
});
|
|
|
|
if (!redeemRes.ok) {
|
|
const body = await redeemRes.text().catch(() => '(no body)');
|
|
throw new Error(`Enrollment redemption failed: ${redeemRes.status} — ${body}`);
|
|
}
|
|
|
|
const redeemResult = (await redeemRes.json()) as EnrollmentRedeemResult;
|
|
console.log(`[seed] Cert issued (${redeemResult.certPem.length} bytes)`);
|
|
|
|
// 5. Store cert on Server A peer record
|
|
await adminFetch<unknown>(SERVER_A_URL, `/api/admin/federation/peers/${peer.peerId}/cert`, {
|
|
method: 'PATCH',
|
|
adminKey: ADMIN_KEY_A,
|
|
body: { certPem: redeemResult.certPem },
|
|
});
|
|
console.log(`[seed] Cert stored on A — peer ${peer.peerId} is now active`);
|
|
|
|
// Verify grant flipped to active on B
|
|
const activeGrant = await adminFetch<GrantRecord>(
|
|
SERVER_B_URL,
|
|
`/api/admin/federation/grants/${grant.id}`,
|
|
{ adminKey: ADMIN_KEY_B },
|
|
);
|
|
console.log(`[seed] Grant status on B: ${activeGrant.status}`);
|
|
|
|
return { grant: activeGrant, peer: { ...peer, grantId: grant.id } };
|
|
}
|
|
|
|
// ─── Test data insertion ──────────────────────────────────────────────────────
|
|
|
|
/**
|
|
* Insert representative test data on Server B via its admin APIs.
|
|
*
|
|
* NOTE: The gateway's task/note/credential APIs require an authenticated user
|
|
* session. For the harness, we seed via admin-level endpoints if available,
|
|
* or document the gap here for M3-11 to fill in with proper user session seeding.
|
|
*
|
|
* ASSUMPTION: Server B exposes POST /api/admin/tasks (or similar) for test data.
|
|
* If that endpoint does not yet exist, this function logs a warning and skips
|
|
* without failing — M3-11 will add the session-based seeding path.
|
|
*/
|
|
async function seedTestData(subjectUserId: string, scopeLabel: string): Promise<void> {
|
|
console.log(`\n[seed] Seeding test data on Server B for ${scopeLabel}...`);
|
|
|
|
const testTasks = [
|
|
{
|
|
title: `${scopeLabel} Task 1`,
|
|
description: 'Federation harness test task',
|
|
userId: subjectUserId,
|
|
},
|
|
{
|
|
title: `${scopeLabel} Task 2`,
|
|
description: 'Team-scoped test task',
|
|
userId: subjectUserId,
|
|
teamId: 'T1',
|
|
},
|
|
];
|
|
|
|
const testNotes = [
|
|
{
|
|
title: `${scopeLabel} Note 1`,
|
|
content: 'Personal note for federation test',
|
|
userId: subjectUserId,
|
|
},
|
|
];
|
|
|
|
// Attempt to insert — tolerate 404 (endpoint not yet implemented)
|
|
for (const task of testTasks) {
|
|
try {
|
|
await adminFetch<unknown>(SERVER_B_URL, '/api/admin/tasks', {
|
|
method: 'POST',
|
|
adminKey: ADMIN_KEY_B,
|
|
body: task,
|
|
});
|
|
console.log(`[seed] Inserted task: "${task.title}"`);
|
|
} catch (err) {
|
|
const msg = err instanceof Error ? err.message : String(err);
|
|
if (msg.includes('404') || msg.includes('Cannot POST')) {
|
|
console.warn(
|
|
`[seed] WARN: /api/admin/tasks not found — skipping task insertion (expected until M3-11)`,
|
|
);
|
|
break;
|
|
}
|
|
throw err;
|
|
}
|
|
}
|
|
|
|
for (const note of testNotes) {
|
|
try {
|
|
await adminFetch<unknown>(SERVER_B_URL, '/api/admin/notes', {
|
|
method: 'POST',
|
|
adminKey: ADMIN_KEY_B,
|
|
body: note,
|
|
});
|
|
console.log(`[seed] Inserted note: "${note.title}"`);
|
|
} catch (err) {
|
|
const msg = err instanceof Error ? err.message : String(err);
|
|
if (msg.includes('404') || msg.includes('Cannot POST')) {
|
|
console.warn(
|
|
`[seed] WARN: /api/admin/notes not found — skipping note insertion (expected until M3-11)`,
|
|
);
|
|
break;
|
|
}
|
|
throw err;
|
|
}
|
|
}
|
|
|
|
console.log(`[seed] Test data seeding for ${scopeLabel} complete.`);
|
|
}
|
|
|
|
// ─── Main entrypoint ──────────────────────────────────────────────────────────
|
|
|
|
export async function runSeed(opts?: {
|
|
serverAUrl?: string;
|
|
serverBUrl?: string;
|
|
adminKeyA?: string;
|
|
adminKeyB?: string;
|
|
subjectUserIds?: { variantA: string; variantB: string; variantC: string };
|
|
}): Promise<SeedResult> {
|
|
const aUrl = opts?.serverAUrl ?? SERVER_A_URL;
|
|
const bUrl = opts?.serverBUrl ?? SERVER_B_URL;
|
|
const keyA = opts?.adminKeyA ?? ADMIN_KEY_A;
|
|
const keyB = opts?.adminKeyB ?? ADMIN_KEY_B;
|
|
|
|
// Use provided or default subject user IDs
|
|
// In a real run these would be real user UUIDs from Server B's DB.
|
|
// For the harness, we use deterministic UUIDs that the seed bootstrap creates.
|
|
const subjectIds = opts?.subjectUserIds ?? {
|
|
variantA: '00000000-0000-0000-0000-000000000001',
|
|
variantB: '00000000-0000-0000-0000-000000000002',
|
|
variantC: '00000000-0000-0000-0000-000000000003',
|
|
};
|
|
|
|
console.log('[seed] Waiting for gateways to be ready...');
|
|
await Promise.all([waitForGateway(aUrl, 'Server A'), waitForGateway(bUrl, 'Server B')]);
|
|
|
|
// Enroll all three scope variants in parallel
|
|
console.log('\n[seed] Enrolling scope variants...');
|
|
const [resultA, resultB, resultC] = await Promise.all([
|
|
enrollGrant({ label: 'A', subjectUserId: subjectIds.variantA, scope: SCOPE_VARIANT_A }),
|
|
enrollGrant({ label: 'B', subjectUserId: subjectIds.variantB, scope: SCOPE_VARIANT_B }),
|
|
enrollGrant({ label: 'C', subjectUserId: subjectIds.variantC, scope: SCOPE_VARIANT_C }),
|
|
]);
|
|
|
|
// Seed test data on Server B for each scope variant
|
|
await Promise.all([
|
|
seedTestData(subjectIds.variantA, 'A'),
|
|
seedTestData(subjectIds.variantB, 'B'),
|
|
seedTestData(subjectIds.variantC, 'C'),
|
|
]);
|
|
|
|
const result: SeedResult = {
|
|
serverAUrl: aUrl,
|
|
serverBUrl: bUrl,
|
|
grants: {
|
|
variantA: resultA.grant,
|
|
variantB: resultB.grant,
|
|
variantC: resultC.grant,
|
|
},
|
|
peers: {
|
|
variantA: resultA.peer,
|
|
variantB: resultB.peer,
|
|
variantC: resultC.peer,
|
|
},
|
|
};
|
|
|
|
console.log('\n[seed] Seed complete.');
|
|
console.log('[seed] Summary:');
|
|
console.log(` Variant A grant: ${result.grants.variantA.id} (${result.grants.variantA.status})`);
|
|
console.log(` Variant B grant: ${result.grants.variantB.id} (${result.grants.variantB.status})`);
|
|
console.log(` Variant C grant: ${result.grants.variantC.id} (${result.grants.variantC.status})`);
|
|
|
|
return result;
|
|
}
|
|
|
|
// ─── CLI entry ────────────────────────────────────────────────────────────────
|
|
|
|
const isCli =
|
|
process.argv[1] != null &&
|
|
fileURLToPath(import.meta.url).endsWith(process.argv[1]!.split('/').pop()!);
|
|
|
|
if (isCli) {
|
|
const shouldBoot = process.argv.includes('--boot');
|
|
|
|
if (shouldBoot) {
|
|
console.log('[seed] --boot flag detected — starting compose stack...');
|
|
execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' });
|
|
}
|
|
|
|
runSeed()
|
|
.then(() => {
|
|
process.exit(0);
|
|
})
|
|
.catch((err) => {
|
|
console.error('[seed] Fatal:', err);
|
|
process.exit(1);
|
|
});
|
|
}
|