fix(federation): harness CRIT bugs — admin bootstrap auth + peer FK + boot deadline (review remediation)
Some checks are pending
ci/woodpecker/push/ci Pipeline is running
ci/woodpecker/pr/ci Pipeline is running

CRIT-1: Replace nonexistent x-admin-key header with Authorization: Bearer <token>;
add bootstrapAdmin() to call POST /api/bootstrap/setup on each pristine gateway
before any admin-guarded endpoint is used.

CRIT-2: Fix cross-gateway peer FK violation — peer keypair is now created on
Server B first (so the grant FK resolves against B's own federation_peers table),
then Server A creates its own keypair and redeems the enrollment token at B.

HIGH-3: waitForStack() now polls both gateways in parallel via Promise.all, each
with an independent deadline, so a slow gateway-a cannot starve gateway-b's budget.

MED-4: seed() throws immediately with a clear error if scenario !== 'all';
per-variant narrowing deferred to M3-11 with explicit JSDoc note.

Also: remove ADMIN_API_KEY (no such path in AdminGuard) from compose, replace
with ADMIN_BOOTSTRAP_PASSWORD; add BETTER_AUTH_URL production-code limitation
as a TODO in the README.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Jarvis
2026-04-23 20:35:36 -05:00
parent 3bfd5195b2
commit 190f12a971
4 changed files with 337 additions and 108 deletions

View File

@@ -87,7 +87,8 @@ afterAll(async () => {
});
test('variant A: list tasks returns personal tasks', async () => {
const seedResult = await seed(handle, 'variantA');
// NOTE: Only 'all' is supported for now — per-variant narrowing is M3-11.
const seedResult = await seed(handle, 'all');
const a = serverA(handle);
const res = await fetch(`${a.baseUrl}/api/federation/tasks`, {
@@ -97,6 +98,11 @@ test('variant A: list tasks returns personal tasks', async () => {
});
```
> **Note:** `seed()` bootstraps a fresh admin user on each gateway via
> `POST /api/bootstrap/setup`. Both gateways must have zero users (pristine DB).
> If either gateway already has users, `seed()` throws with a clear error.
> Reset state with `docker compose down -v`.
The `bootHarness()` function is **idempotent**: if both gateways are already
healthy, it reuses the running stack and returns `ownedStack: false`. Tests
should not call `tearDownHarness` when `ownedStack` is false unless they
@@ -207,6 +213,25 @@ The gateway image is pinned to `sha256:1069117740e00ccfeba357cae38c43f3729fe5ae7
tag is forbidden per Mosaic image policy. When a new gateway build is promoted,
update the digest in `docker-compose.two-gateways.yml` and in this file.
## Known Limitations
### BETTER_AUTH_URL enrollment URL bug (production code — not fixed here)
`apps/gateway/src/federation/federation.controller.ts:145` constructs the
enrollment URL using `process.env['BETTER_AUTH_URL'] ?? 'http://localhost:14242'`.
In non-harness deployments (where `BETTER_AUTH_URL` is not set or points to the
web origin rather than the gateway's own base URL) this produces an incorrect
enrollment URL that points to the wrong host or port.
The harness works around this by explicitly setting
`BETTER_AUTH_URL: 'http://gateway-b:3000'` in the compose file so the enrollment
URL correctly references gateway-b's internal Docker hostname.
**TODO:** Fix `federation.controller.ts` to derive the enrollment URL from its own
listening address (e.g. `GATEWAY_BASE_URL` env var or a dedicated
`FEDERATION_ENROLLMENT_BASE_URL` env var) rather than reusing `BETTER_AUTH_URL`.
Tracked as a follow-up to PR #505 — do not bundle with harness changes.
## Permanent Infrastructure
This harness is designed to outlive M3 and be reused by M4+ milestone tests.

View File

@@ -122,8 +122,10 @@ services:
BETTER_AUTH_URL: 'http://gateway-a:3000'
STEP_CA_URL: 'https://step-ca:9000'
FEDERATION_PEER_HOSTNAME: gateway-a
# Admin key — fixed for harness use only; never use in production
ADMIN_API_KEY: harness-admin-key-a
# Bootstrap password for POST /api/bootstrap/setup — used by seed.ts to create
# the first admin user. Only valid on a pristine (zero-user) database.
# Not the same as ADMIN_API_KEY — there is no static API key in the gateway.
ADMIN_BOOTSTRAP_PASSWORD: harness-admin-password-a
depends_on:
postgres-a:
condition: service_healthy
@@ -201,8 +203,10 @@ services:
BETTER_AUTH_URL: 'http://gateway-b:3000'
STEP_CA_URL: 'https://step-ca:9000'
FEDERATION_PEER_HOSTNAME: gateway-b
# Admin key — fixed for harness use only; never use in production
ADMIN_API_KEY: harness-admin-key-b
# Bootstrap password for POST /api/bootstrap/setup — used by seed.ts to create
# the first admin user. Only valid on a pristine (zero-user) database.
# Not the same as ADMIN_API_KEY — there is no static API key in the gateway.
ADMIN_BOOTSTRAP_PASSWORD: harness-admin-password-b
depends_on:
postgres-b:
condition: service_healthy

View File

@@ -19,14 +19,17 @@
* });
*
* test('variant A — list tasks', async () => {
* const seedResult = await seed(handle, 'variantA');
* const seedResult = await seed(handle, 'all');
* const a = serverA(handle);
* const res = await fetch(`${a.baseUrl}/api/federation/list/tasks`, {
* headers: { 'x-admin-key': a.adminKey },
* headers: { Authorization: `Bearer ${seedResult.adminTokenA}` },
* });
* expect(res.status).toBe(200);
* });
*
* NOTE: The `seed()` helper currently only supports scenario='all'. Passing any
* other value throws immediately. Per-variant narrowing is deferred to M3-11.
*
* ESM / NodeNext: all imports use .js extensions.
*/
@@ -40,8 +43,8 @@ import { runSeed, type SeedResult } from './seed.js';
export interface GatewayAccessor {
/** Base URL reachable from the host machine, e.g. http://localhost:14001 */
baseUrl: string;
/** Admin key for X-Admin-Key header */
adminKey: string;
/** Bootstrap password used for POST /api/bootstrap/setup on a pristine gateway */
bootstrapPassword: string;
/** Internal Docker network hostname (for container-to-container calls) */
internalHostname: string;
}
@@ -59,6 +62,11 @@ export interface HarnessHandle {
seedResult?: SeedResult;
}
/**
* Scenario to seed. Currently only 'all' is implemented; per-variant narrowing
* is tracked as M3-11. Passing any other value throws immediately with a clear
* error rather than silently over-seeding.
*/
export type SeedScenario = 'variantA' | 'variantB' | 'variantC' | 'all';
// ─── Constants ────────────────────────────────────────────────────────────────
@@ -68,8 +76,10 @@ const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
const GATEWAY_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
const GATEWAY_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
const ADMIN_KEY_A = process.env['ADMIN_KEY_A'] ?? 'harness-admin-key-a';
const ADMIN_KEY_B = process.env['ADMIN_KEY_B'] ?? 'harness-admin-key-b';
const ADMIN_BOOTSTRAP_PASSWORD_A =
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
const ADMIN_BOOTSTRAP_PASSWORD_B =
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
const READINESS_TIMEOUT_MS = 180_000;
const READINESS_POLL_MS = 3_000;
@@ -85,29 +95,43 @@ async function isGatewayHealthy(baseUrl: string): Promise<boolean> {
}
}
/**
* Poll both gateways in parallel until both are healthy or the shared deadline
* expires. Polling in parallel (rather than sequentially) avoids the bug where
* a slow gateway-a consumes all of the readiness budget before gateway-b is
* checked.
*/
async function waitForStack(handle: HarnessHandle): Promise<void> {
const deadline = Date.now() + READINESS_TIMEOUT_MS;
const gateways: Array<{ label: string; url: string }> = [
{ label: 'gateway-a', url: handle.a.baseUrl },
{ label: 'gateway-b', url: handle.b.baseUrl },
];
for (const gw of gateways) {
process.stdout.write(`[harness] Waiting for ${gw.label}...`);
while (Date.now() < deadline) {
if (await isGatewayHealthy(gw.url)) {
process.stdout.write(' ready\n');
break;
await Promise.all(
gateways.map(async (gw) => {
// Each gateway gets its own independent deadline.
const deadline = Date.now() + READINESS_TIMEOUT_MS;
process.stdout.write(`[harness] Waiting for ${gw.label}...`);
while (Date.now() < deadline) {
if (await isGatewayHealthy(gw.url)) {
process.stdout.write(` ready\n`);
return;
}
if (Date.now() + READINESS_POLL_MS > deadline) {
throw new Error(
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
);
}
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
process.stdout.write('.');
}
if (Date.now() + READINESS_POLL_MS > deadline) {
throw new Error(
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS}ms`,
);
}
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
process.stdout.write('.');
}
}
throw new Error(
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
);
}),
);
}
function isStackRunning(): boolean {
@@ -155,12 +179,12 @@ export async function bootHarness(): Promise<HarnessHandle> {
const handle: HarnessHandle = {
a: {
baseUrl: GATEWAY_A_URL,
adminKey: ADMIN_KEY_A,
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_A,
internalHostname: 'gateway-a',
},
b: {
baseUrl: GATEWAY_B_URL,
adminKey: ADMIN_KEY_B,
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_B,
internalHostname: 'gateway-b',
},
composeFile: COMPOSE_FILE,
@@ -231,26 +255,34 @@ export function serverB(handle: HarnessHandle): GatewayAccessor {
* Seed the harness with test data for one or more scenarios.
*
* @param handle The harness handle returned by bootHarness().
* @param scenario Which scope variants to provision:
* 'variantA' | 'variantB' | 'variantC' | 'all'
* @param scenario Which scope variants to provision. Currently only 'all' is
* supported — passing any other value throws immediately with a
* clear error. Per-variant narrowing is tracked as M3-11.
*
* Returns a SeedResult with grant IDs and peer IDs for each variant,
* which test assertions can reference.
* Returns a SeedResult with grant IDs, peer IDs, and admin tokens for each
* gateway, which test assertions can reference.
*
* IMPORTANT: The harness assumes a pristine database on both gateways. The seed
* bootstraps an admin user on each gateway via POST /api/bootstrap/setup. If
* either gateway already has users, seed() throws with a clear error message.
* Run 'docker compose down -v' to reset state.
*/
export async function seed(
handle: HarnessHandle,
scenario: SeedScenario = 'all',
): Promise<SeedResult> {
// For now all scenarios run the full seed — M3-11 can narrow this.
// The seed script is idempotent in the sense that it creates new grants
// each time; tests should start with a clean stack for isolation.
void scenario; // narrowing deferred to M3-11
if (scenario !== 'all') {
throw new Error(
`seed: scenario narrowing not yet implemented; pass "all" for now. ` +
`Got: "${scenario}". Per-variant narrowing is tracked as M3-11.`,
);
}
const result = await runSeed({
serverAUrl: handle.a.baseUrl,
serverBUrl: handle.b.baseUrl,
adminKeyA: handle.a.adminKey,
adminKeyB: handle.b.adminKey,
adminBootstrapPasswordA: handle.a.bootstrapPassword,
adminBootstrapPasswordB: handle.b.bootstrapPassword,
});
handle.seedResult = result;

View File

@@ -8,13 +8,20 @@
* What this script does:
* 1. (Optional) Boots the compose stack if --boot flag is passed.
* 2. Waits for both gateways to be healthy.
* 3. Creates three grants on Server B matching the M3 acceptance test scenarios:
* 3. Bootstraps an admin user + token on each gateway via POST /api/bootstrap/setup.
* 4. Creates three grants on Server B matching the M3 acceptance test scenarios:
* - Scope variant A: tasks + notes, include_personal: true
* - Scope variant B: tasks only, include_teams: ['T1'], exclude T2
* - Scope variant C: tasks + credentials in resources, credentials excluded (sanity)
* 4. For each grant, walks the enrollment flow so Server A ends up with
* an active peer + cert + sealed key.
* 5. Inserts representative test tasks/notes/credentials on Server B.
* 5. For each grant, walks the full enrollment flow:
* a. Server B creates a peer keypair (represents the requesting side).
* b. Server B creates the grant referencing that peer.
* c. Server B issues an enrollment token.
* d. Server A creates its own peer keypair (represents its view of B).
* e. Server A redeems the enrollment token at Server B's enrollment endpoint,
* submitting A's CSR → receives signed cert back.
* f. Server A stores the cert on its peer record → peer becomes active.
* 6. Inserts representative test tasks/notes/credentials on Server B.
*
* IMPORTANT: This script uses the real admin REST API — no direct DB writes.
* It exercises the full enrollment flow as M3 acceptance tests will.
@@ -34,8 +41,16 @@ const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
/** Base URLs as seen from the host machine (mapped host ports). */
const SERVER_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
const SERVER_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
const ADMIN_KEY_A = process.env['ADMIN_KEY_A'] ?? 'harness-admin-key-a';
const ADMIN_KEY_B = process.env['ADMIN_KEY_B'] ?? 'harness-admin-key-b';
/**
* Bootstrap passwords used when calling POST /api/bootstrap/setup on each
* gateway. Each gateway starts with zero users and requires a one-time setup
* call before any admin-guarded endpoints can be used.
*/
const ADMIN_BOOTSTRAP_PASSWORD_A =
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
const ADMIN_BOOTSTRAP_PASSWORD_B =
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
const READINESS_TIMEOUT_MS = 120_000;
const READINESS_POLL_MS = 3_000;
@@ -82,7 +97,7 @@ export const SCOPE_VARIANT_C = {
interface AdminFetchOptions {
method?: string;
body?: unknown;
adminKey: string;
adminToken: string;
}
interface PeerRecord {
@@ -107,9 +122,18 @@ interface EnrollmentRedeemResult {
certChainPem: string;
}
interface BootstrapResult {
adminUserId: string;
adminToken: string;
}
export interface SeedResult {
serverAUrl: string;
serverBUrl: string;
adminTokenA: string;
adminTokenB: string;
adminUserIdA: string;
adminUserIdB: string;
grants: {
variantA: GrantRecord;
variantB: GrantRecord;
@@ -124,13 +148,18 @@ export interface SeedResult {
// ─── HTTP helpers ─────────────────────────────────────────────────────────────
/**
* Authenticated admin fetch. Sends `Authorization: Bearer <adminToken>` which
* is the only path supported by AdminGuard (DB-backed sha256 token lookup).
* No `x-admin-key` header path exists in the gateway.
*/
async function adminFetch<T>(baseUrl: string, path: string, opts: AdminFetchOptions): Promise<T> {
const url = `${baseUrl}${path}`;
const res = await fetch(url, {
method: opts.method ?? 'GET',
headers: {
'Content-Type': 'application/json',
'x-admin-key': opts.adminKey,
Authorization: `Bearer ${opts.adminToken}`,
},
body: opts.body !== undefined ? JSON.stringify(opts.body) : undefined,
});
@@ -143,6 +172,74 @@ async function adminFetch<T>(baseUrl: string, path: string, opts: AdminFetchOpti
return res.json() as Promise<T>;
}
// ─── Admin bootstrap ──────────────────────────────────────────────────────────
/**
* Bootstrap an admin user on a pristine gateway.
*
* Steps:
* 1. GET /api/bootstrap/status — confirms needsSetup === true.
* 2. POST /api/bootstrap/setup with { name, email, password } — returns
* { user, token: { plaintext } }.
*
* The harness assumes a fresh DB. If needsSetup is false the harness fails
* fast with a clear error rather than proceeding with an unknown token.
*/
async function bootstrapAdmin(
baseUrl: string,
label: string,
password: string,
): Promise<BootstrapResult> {
console.log(`[seed] Bootstrapping admin on ${label} (${baseUrl})...`);
// 1. Check status
const statusRes = await fetch(`${baseUrl}/api/bootstrap/status`);
if (!statusRes.ok) {
throw new Error(
`[seed] GET ${baseUrl}/api/bootstrap/status → ${statusRes.status.toString()}`,
);
}
const status = (await statusRes.json()) as { needsSetup: boolean };
if (!status.needsSetup) {
throw new Error(
`[seed] ${label} at ${baseUrl} already has users (needsSetup=false). ` +
`The harness requires a pristine database. Run 'docker compose down -v' to reset.`,
);
}
// 2. Bootstrap
const setupRes = await fetch(`${baseUrl}/api/bootstrap/setup`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
name: `Harness Admin (${label})`,
email: `harness-admin-${label.toLowerCase()}@example.invalid`,
password,
}),
});
if (!setupRes.ok) {
const body = await setupRes.text().catch(() => '(no body)');
throw new Error(
`[seed] POST ${baseUrl}/api/bootstrap/setup → ${setupRes.status.toString()}: ${body}`,
);
}
const result = (await setupRes.json()) as {
user: { id: string };
token: { plaintext: string };
};
console.log(`[seed] ${label} admin user: ${result.user.id}`);
console.log(`[seed] ${label} admin token: ${result.token.plaintext.slice(0, 8)}...`);
return {
adminUserId: result.user.id,
adminToken: result.token.plaintext,
};
}
// ─── Readiness probe ──────────────────────────────────────────────────────────
async function waitForGateway(baseUrl: string, label: string): Promise<void> {
@@ -156,7 +253,7 @@ async function waitForGateway(baseUrl: string, label: string): Promise<void> {
console.log(`[seed] ${label} is ready (${baseUrl})`);
return;
}
lastError = `HTTP ${res.status}`;
lastError = `HTTP ${res.status.toString()}`;
} catch (err) {
lastError = err instanceof Error ? err.message : String(err);
}
@@ -164,48 +261,62 @@ async function waitForGateway(baseUrl: string, label: string): Promise<void> {
}
throw new Error(
`[seed] ${label} did not become ready within ${READINESS_TIMEOUT_MS}ms — last error: ${lastError}`,
`[seed] ${label} did not become ready within ${READINESS_TIMEOUT_MS.toString()}ms — last error: ${lastError}`,
);
}
// ─── Enrollment flow ──────────────────────────────────────────────────────────
/**
* Walk the full enrollment flow for one grant:
* 1. Create a peer keypair on Server A (generates CSR).
* 2. Create a grant on Server B referencing the peer.
* 3. Generate an enrollment token on Server B.
* 4. Redeem the token on Server B with A's CSR → get cert back.
* 5. Store the cert on Server A's peer record.
* Walk the full enrollment flow for one grant.
*
* Returns the activated grant record + peer info.
* The correct two-sided flow (matching the data model's FK semantics):
*
* 1. On Server B: POST /api/admin/federation/peers/keypair
* → peerId_B (Server B's peer record representing the requesting side)
* 2. On Server B: POST /api/admin/federation/grants with peerId: peerId_B
* → grant (FK to Server B's own federation_peers table — no violation)
* 3. On Server B: POST /api/admin/federation/grants/:id/tokens
* → enrollmentUrl pointing back to Server B
* 4. On Server A: POST /api/admin/federation/peers/keypair
* → peerId_A + csrPem_A (Server A's local record of Server B)
* 5. Server A → Server B: POST enrollmentUrl with { csrPem: csrPem_A }
* → certPem signed by Server B's CA
* 6. On Server A: PATCH /api/admin/federation/peers/:peerId_A/cert with certPem
* → Server A's peer record transitions to active
*
* Returns the activated grant (from Server B) and Server A's peer record.
*/
async function enrollGrant(opts: {
label: string;
subjectUserId: string;
scope: unknown;
adminTokenA: string;
adminTokenB: string;
serverAUrl: string;
serverBUrl: string;
}): Promise<{ grant: GrantRecord; peer: PeerRecord & { grantId: string } }> {
const { label, subjectUserId, scope } = opts;
const { label, subjectUserId, scope, adminTokenA, adminTokenB, serverAUrl, serverBUrl } = opts;
console.log(`\n[seed] Enrolling grant for scope variant ${label}...`);
// 1. Create peer keypair on Server A
const peer = await adminFetch<PeerRecord>(SERVER_A_URL, '/api/admin/federation/peers/keypair', {
// 1. Create peer keypair on Server B (represents the requesting peer from B's perspective)
const peerB = await adminFetch<PeerRecord>(serverBUrl, '/api/admin/federation/peers/keypair', {
method: 'POST',
adminKey: ADMIN_KEY_A,
adminToken: adminTokenB,
body: {
commonName: `harness-peer-${label.toLowerCase()}`,
displayName: `Harness Peer ${label}`,
endpointUrl: `${SERVER_B_URL}`,
commonName: `harness-peer-${label.toLowerCase()}-from-b`,
displayName: `Harness Peer ${label} (Server A as seen from B)`,
endpointUrl: serverAUrl,
},
});
console.log(`[seed] Created peer on A: ${peer.peerId}`);
console.log(`[seed] Created peer on B: ${peerB.peerId}`);
// 2. Create grant on Server B
const grant = await adminFetch<GrantRecord>(SERVER_B_URL, '/api/admin/federation/grants', {
// 2. Create grant on Server B referencing B's own peer record
const grant = await adminFetch<GrantRecord>(serverBUrl, '/api/admin/federation/grants', {
method: 'POST',
adminKey: ADMIN_KEY_B,
adminToken: adminTokenB,
body: {
peerId: peer.peerId,
peerId: peerB.peerId,
subjectUserId,
scope,
},
@@ -214,46 +325,59 @@ async function enrollGrant(opts: {
// 3. Generate enrollment token on Server B
const tokenResult = await adminFetch<EnrollmentTokenResult>(
SERVER_B_URL,
serverBUrl,
`/api/admin/federation/grants/${grant.id}/tokens`,
{ method: 'POST', adminKey: ADMIN_KEY_B, body: { ttlSeconds: 900 } },
{ method: 'POST', adminToken: adminTokenB, body: { ttlSeconds: 900 } },
);
console.log(`[seed] Enrollment token: ${tokenResult.token.slice(0, 8)}...`);
console.log(`[seed] Enrollment URL: ${tokenResult.enrollmentUrl}`);
// 4. Redeem token on Server B with A's CSR
// The enrollment endpoint is not admin-guarded — it uses the one-time token.
const redeemUrl = `${SERVER_B_URL}/api/federation/enrollment/${tokenResult.token}`;
// 4. Create peer keypair on Server A (Server A's local record of Server B)
const peerA = await adminFetch<PeerRecord>(serverAUrl, '/api/admin/federation/peers/keypair', {
method: 'POST',
adminToken: adminTokenA,
body: {
commonName: `harness-peer-${label.toLowerCase()}-from-a`,
displayName: `Harness Peer ${label} (Server B as seen from A)`,
endpointUrl: serverBUrl,
},
});
console.log(`[seed] Created peer on A: ${peerA.peerId}`);
// 5. Redeem token at Server B's enrollment endpoint with A's CSR
// The enrollment endpoint is not admin-guarded — the one-time token IS the credential.
const redeemUrl = tokenResult.enrollmentUrl;
const redeemRes = await fetch(redeemUrl, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ csrPem: peer.csrPem }),
body: JSON.stringify({ csrPem: peerA.csrPem }),
});
if (!redeemRes.ok) {
const body = await redeemRes.text().catch(() => '(no body)');
throw new Error(`Enrollment redemption failed: ${redeemRes.status}${body}`);
throw new Error(`Enrollment redemption failed: ${redeemRes.status.toString()}${body}`);
}
const redeemResult = (await redeemRes.json()) as EnrollmentRedeemResult;
console.log(`[seed] Cert issued (${redeemResult.certPem.length} bytes)`);
console.log(`[seed] Cert issued (${redeemResult.certPem.length.toString()} bytes)`);
// 5. Store cert on Server A peer record
await adminFetch<unknown>(SERVER_A_URL, `/api/admin/federation/peers/${peer.peerId}/cert`, {
// 6. Store cert on Server A's peer record → transitions to active
await adminFetch<unknown>(serverAUrl, `/api/admin/federation/peers/${peerA.peerId}/cert`, {
method: 'PATCH',
adminKey: ADMIN_KEY_A,
adminToken: adminTokenA,
body: { certPem: redeemResult.certPem },
});
console.log(`[seed] Cert stored on A — peer ${peer.peerId} is now active`);
console.log(`[seed] Cert stored on A — peer ${peerA.peerId} is now active`);
// Verify grant flipped to active on B
const activeGrant = await adminFetch<GrantRecord>(
SERVER_B_URL,
serverBUrl,
`/api/admin/federation/grants/${grant.id}`,
{ adminKey: ADMIN_KEY_B },
{ adminToken: adminTokenB },
);
console.log(`[seed] Grant status on B: ${activeGrant.status}`);
return { grant: activeGrant, peer: { ...peer, grantId: grant.id } };
return { grant: activeGrant, peer: { ...peerA, grantId: grant.id } };
}
// ─── Test data insertion ──────────────────────────────────────────────────────
@@ -269,7 +393,12 @@ async function enrollGrant(opts: {
* If that endpoint does not yet exist, this function logs a warning and skips
* without failing — M3-11 will add the session-based seeding path.
*/
async function seedTestData(subjectUserId: string, scopeLabel: string): Promise<void> {
async function seedTestData(
subjectUserId: string,
scopeLabel: string,
serverBUrl: string,
adminTokenB: string,
): Promise<void> {
console.log(`\n[seed] Seeding test data on Server B for ${scopeLabel}...`);
const testTasks = [
@@ -297,9 +426,9 @@ async function seedTestData(subjectUserId: string, scopeLabel: string): Promise<
// Attempt to insert — tolerate 404 (endpoint not yet implemented)
for (const task of testTasks) {
try {
await adminFetch<unknown>(SERVER_B_URL, '/api/admin/tasks', {
await adminFetch<unknown>(serverBUrl, '/api/admin/tasks', {
method: 'POST',
adminKey: ADMIN_KEY_B,
adminToken: adminTokenB,
body: task,
});
console.log(`[seed] Inserted task: "${task.title}"`);
@@ -317,9 +446,9 @@ async function seedTestData(subjectUserId: string, scopeLabel: string): Promise<
for (const note of testNotes) {
try {
await adminFetch<unknown>(SERVER_B_URL, '/api/admin/notes', {
await adminFetch<unknown>(serverBUrl, '/api/admin/notes', {
method: 'POST',
adminKey: ADMIN_KEY_B,
adminToken: adminTokenB,
body: note,
});
console.log(`[seed] Inserted note: "${note.title}"`);
@@ -343,45 +472,84 @@ async function seedTestData(subjectUserId: string, scopeLabel: string): Promise<
export async function runSeed(opts?: {
serverAUrl?: string;
serverBUrl?: string;
adminKeyA?: string;
adminKeyB?: string;
adminBootstrapPasswordA?: string;
adminBootstrapPasswordB?: string;
subjectUserIds?: { variantA: string; variantB: string; variantC: string };
}): Promise<SeedResult> {
const aUrl = opts?.serverAUrl ?? SERVER_A_URL;
const bUrl = opts?.serverBUrl ?? SERVER_B_URL;
const keyA = opts?.adminKeyA ?? ADMIN_KEY_A;
const keyB = opts?.adminKeyB ?? ADMIN_KEY_B;
const passwordA = opts?.adminBootstrapPasswordA ?? ADMIN_BOOTSTRAP_PASSWORD_A;
const passwordB = opts?.adminBootstrapPasswordB ?? ADMIN_BOOTSTRAP_PASSWORD_B;
// Use provided or default subject user IDs
// Use provided or default subject user IDs.
// In a real run these would be real user UUIDs from Server B's DB.
// For the harness, we use deterministic UUIDs that the seed bootstrap creates.
const subjectIds = opts?.subjectUserIds ?? {
variantA: '00000000-0000-0000-0000-000000000001',
variantB: '00000000-0000-0000-0000-000000000002',
variantC: '00000000-0000-0000-0000-000000000003',
};
// For the harness, the admin bootstrap user on Server B is used as the subject.
// These are overridden after bootstrap if opts.subjectUserIds is not provided.
const subjectIds = opts?.subjectUserIds;
console.log('[seed] Waiting for gateways to be ready...');
await Promise.all([waitForGateway(aUrl, 'Server A'), waitForGateway(bUrl, 'Server B')]);
// Enroll all three scope variants in parallel
console.log('\n[seed] Enrolling scope variants...');
const [resultA, resultB, resultC] = await Promise.all([
enrollGrant({ label: 'A', subjectUserId: subjectIds.variantA, scope: SCOPE_VARIANT_A }),
enrollGrant({ label: 'B', subjectUserId: subjectIds.variantB, scope: SCOPE_VARIANT_B }),
enrollGrant({ label: 'C', subjectUserId: subjectIds.variantC, scope: SCOPE_VARIANT_C }),
// Bootstrap admin users on both gateways (requires pristine DBs).
console.log('\n[seed] Bootstrapping admin accounts...');
const [bootstrapA, bootstrapB] = await Promise.all([
bootstrapAdmin(aUrl, 'Server A', passwordA),
bootstrapAdmin(bUrl, 'Server B', passwordB),
]);
// Default subject user IDs to the admin user on Server B (guaranteed to exist).
const resolvedSubjectIds = subjectIds ?? {
variantA: bootstrapB.adminUserId,
variantB: bootstrapB.adminUserId,
variantC: bootstrapB.adminUserId,
};
// Enroll all three scope variants sequentially to avoid race conditions on
// the step-ca signing queue. Parallel enrollment would work too but
// sequential is easier to debug when something goes wrong.
console.log('\n[seed] Enrolling scope variants...');
const resultA = await enrollGrant({
label: 'A',
subjectUserId: resolvedSubjectIds.variantA,
scope: SCOPE_VARIANT_A,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
serverAUrl: aUrl,
serverBUrl: bUrl,
});
const resultB = await enrollGrant({
label: 'B',
subjectUserId: resolvedSubjectIds.variantB,
scope: SCOPE_VARIANT_B,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
serverAUrl: aUrl,
serverBUrl: bUrl,
});
const resultC = await enrollGrant({
label: 'C',
subjectUserId: resolvedSubjectIds.variantC,
scope: SCOPE_VARIANT_C,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
serverAUrl: aUrl,
serverBUrl: bUrl,
});
// Seed test data on Server B for each scope variant
await Promise.all([
seedTestData(subjectIds.variantA, 'A'),
seedTestData(subjectIds.variantB, 'B'),
seedTestData(subjectIds.variantC, 'C'),
seedTestData(resolvedSubjectIds.variantA, 'A', bUrl, bootstrapB.adminToken),
seedTestData(resolvedSubjectIds.variantB, 'B', bUrl, bootstrapB.adminToken),
seedTestData(resolvedSubjectIds.variantC, 'C', bUrl, bootstrapB.adminToken),
]);
const result: SeedResult = {
serverAUrl: aUrl,
serverBUrl: bUrl,
adminTokenA: bootstrapA.adminToken,
adminTokenB: bootstrapB.adminToken,
adminUserIdA: bootstrapA.adminUserId,
adminUserIdB: bootstrapB.adminUserId,
grants: {
variantA: resultA.grant,
variantB: resultB.grant,