291 lines
9.9 KiB
TypeScript
291 lines
9.9 KiB
TypeScript
/**
|
|
* tools/federation-harness/harness.ts
|
|
*
|
|
* Vitest-consumable helpers for the two-gateway federation harness.
|
|
*
|
|
* USAGE (in a vitest test file):
|
|
*
|
|
* import { bootHarness, tearDownHarness, serverA, serverB, seed } from
|
|
* '../../tools/federation-harness/harness.js';
|
|
*
|
|
* let handle: HarnessHandle;
|
|
*
|
|
* beforeAll(async () => {
|
|
* handle = await bootHarness();
|
|
* }, 180_000);
|
|
*
|
|
* afterAll(async () => {
|
|
* await tearDownHarness(handle);
|
|
* });
|
|
*
|
|
* test('variant A — list tasks', async () => {
|
|
* const seedResult = await seed(handle, 'all');
|
|
* const a = serverA(handle);
|
|
* const res = await fetch(`${a.baseUrl}/api/federation/list/tasks`, {
|
|
* headers: { Authorization: `Bearer ${seedResult.adminTokenA}` },
|
|
* });
|
|
* expect(res.status).toBe(200);
|
|
* });
|
|
*
|
|
* NOTE: The `seed()` helper currently only supports scenario='all'. Passing any
|
|
* other value throws immediately. Per-variant narrowing is deferred to M3-11.
|
|
*
|
|
* ESM / NodeNext: all imports use .js extensions.
|
|
*/
|
|
|
|
import { execSync, execFileSync } from 'node:child_process';
|
|
import { resolve, dirname } from 'node:path';
|
|
import { fileURLToPath } from 'node:url';
|
|
import { runSeed, type SeedResult } from './seed.js';
|
|
|
|
// ─── Types ───────────────────────────────────────────────────────────────────
|
|
|
|
export interface GatewayAccessor {
|
|
/** Base URL reachable from the host machine, e.g. http://localhost:14001 */
|
|
baseUrl: string;
|
|
/** Bootstrap password used for POST /api/bootstrap/setup on a pristine gateway */
|
|
bootstrapPassword: string;
|
|
/** Internal Docker network hostname (for container-to-container calls) */
|
|
internalHostname: string;
|
|
}
|
|
|
|
export interface HarnessHandle {
|
|
/** Server A accessor */
|
|
a: GatewayAccessor;
|
|
/** Server B accessor */
|
|
b: GatewayAccessor;
|
|
/** Absolute path to the docker-compose file */
|
|
composeFile: string;
|
|
/** Whether this instance booted the stack (vs. reusing an existing one) */
|
|
ownedStack: boolean;
|
|
/** Optional seed result if seed() was called */
|
|
seedResult?: SeedResult;
|
|
}
|
|
|
|
/**
|
|
* Scenario to seed. Currently only 'all' is implemented; per-variant narrowing
|
|
* is tracked as M3-11. Passing any other value throws immediately with a clear
|
|
* error rather than silently over-seeding.
|
|
*/
|
|
export type SeedScenario = 'variantA' | 'variantB' | 'variantC' | 'all';
|
|
|
|
// ─── Constants ────────────────────────────────────────────────────────────────
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
|
|
|
|
const GATEWAY_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
|
|
const GATEWAY_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
|
|
const ADMIN_BOOTSTRAP_PASSWORD_A =
|
|
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
|
|
const ADMIN_BOOTSTRAP_PASSWORD_B =
|
|
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
|
|
|
|
const READINESS_TIMEOUT_MS = 180_000;
|
|
const READINESS_POLL_MS = 3_000;
|
|
|
|
// ─── Internal helpers ─────────────────────────────────────────────────────────
|
|
|
|
async function isGatewayHealthy(baseUrl: string): Promise<boolean> {
|
|
try {
|
|
const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) });
|
|
return res.ok;
|
|
} catch {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Poll both gateways in parallel until both are healthy or the shared deadline
|
|
* expires. Polling in parallel (rather than sequentially) avoids the bug where
|
|
* a slow gateway-a consumes all of the readiness budget before gateway-b is
|
|
* checked.
|
|
*/
|
|
async function waitForStack(handle: HarnessHandle): Promise<void> {
|
|
const gateways: Array<{ label: string; url: string }> = [
|
|
{ label: 'gateway-a', url: handle.a.baseUrl },
|
|
{ label: 'gateway-b', url: handle.b.baseUrl },
|
|
];
|
|
|
|
await Promise.all(
|
|
gateways.map(async (gw) => {
|
|
// Each gateway gets its own independent deadline.
|
|
const deadline = Date.now() + READINESS_TIMEOUT_MS;
|
|
process.stdout.write(`[harness] Waiting for ${gw.label}...`);
|
|
|
|
while (Date.now() < deadline) {
|
|
if (await isGatewayHealthy(gw.url)) {
|
|
process.stdout.write(` ready\n`);
|
|
return;
|
|
}
|
|
if (Date.now() + READINESS_POLL_MS > deadline) {
|
|
throw new Error(
|
|
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
|
|
);
|
|
}
|
|
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
|
|
process.stdout.write('.');
|
|
}
|
|
|
|
throw new Error(
|
|
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
|
|
);
|
|
}),
|
|
);
|
|
}
|
|
|
|
function isStackRunning(): boolean {
|
|
try {
|
|
const output = execFileSync(
|
|
'docker',
|
|
['compose', '-f', COMPOSE_FILE, 'ps', '--format', 'json'],
|
|
{ encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] },
|
|
);
|
|
|
|
if (!output.trim()) return false;
|
|
|
|
// Parse JSON lines — each running service emits a JSON object per line
|
|
const lines = output.trim().split('\n').filter(Boolean);
|
|
const runningServices = lines.filter((line) => {
|
|
try {
|
|
const obj = JSON.parse(line) as { State?: string };
|
|
return obj.State === 'running';
|
|
} catch {
|
|
return false;
|
|
}
|
|
});
|
|
|
|
// Expect at least gateway-a and gateway-b running
|
|
return runningServices.length >= 2;
|
|
} catch {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// ─── Public API ───────────────────────────────────────────────────────────────
|
|
|
|
/**
|
|
* Boot the harness stack.
|
|
*
|
|
* Idempotent: if the stack is already running and both gateways are healthy,
|
|
* this function reuses the existing stack and returns a handle with
|
|
* `ownedStack: false`. Callers that set `ownedStack: false` should NOT call
|
|
* `tearDownHarness` unless they explicitly want to tear down a pre-existing stack.
|
|
*
|
|
* If the stack is not running, it starts it with `docker compose up -d` and
|
|
* waits for both gateways to pass their /api/health probe.
|
|
*/
|
|
export async function bootHarness(): Promise<HarnessHandle> {
|
|
const handle: HarnessHandle = {
|
|
a: {
|
|
baseUrl: GATEWAY_A_URL,
|
|
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_A,
|
|
internalHostname: 'gateway-a',
|
|
},
|
|
b: {
|
|
baseUrl: GATEWAY_B_URL,
|
|
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_B,
|
|
internalHostname: 'gateway-b',
|
|
},
|
|
composeFile: COMPOSE_FILE,
|
|
ownedStack: false,
|
|
};
|
|
|
|
// Check if both gateways are already healthy
|
|
const [aHealthy, bHealthy] = await Promise.all([
|
|
isGatewayHealthy(handle.a.baseUrl),
|
|
isGatewayHealthy(handle.b.baseUrl),
|
|
]);
|
|
|
|
if (aHealthy && bHealthy) {
|
|
console.log('[harness] Stack already running — reusing existing stack.');
|
|
handle.ownedStack = false;
|
|
return handle;
|
|
}
|
|
|
|
console.log('[harness] Starting federation harness stack...');
|
|
execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' });
|
|
handle.ownedStack = true;
|
|
|
|
await waitForStack(handle);
|
|
console.log('[harness] Stack is ready.');
|
|
|
|
return handle;
|
|
}
|
|
|
|
/**
|
|
* Tear down the harness stack.
|
|
*
|
|
* Runs `docker compose down -v` to remove containers AND volumes (ephemeral state).
|
|
* Only tears down if `handle.ownedStack` is true unless `force` is set.
|
|
*/
|
|
export async function tearDownHarness(
|
|
handle: HarnessHandle,
|
|
opts?: { force?: boolean },
|
|
): Promise<void> {
|
|
if (!handle.ownedStack && !opts?.force) {
|
|
console.log(
|
|
'[harness] Stack not owned by this handle — skipping teardown (pass force: true to override).',
|
|
);
|
|
return;
|
|
}
|
|
|
|
console.log('[harness] Tearing down federation harness stack...');
|
|
execSync(`docker compose -f "${handle.composeFile}" down -v`, { stdio: 'inherit' });
|
|
console.log('[harness] Stack torn down.');
|
|
}
|
|
|
|
/**
|
|
* Return the Server A accessor from a harness handle.
|
|
* Convenience wrapper for test readability.
|
|
*/
|
|
export function serverA(handle: HarnessHandle): GatewayAccessor {
|
|
return handle.a;
|
|
}
|
|
|
|
/**
|
|
* Return the Server B accessor from a harness handle.
|
|
* Convenience wrapper for test readability.
|
|
*/
|
|
export function serverB(handle: HarnessHandle): GatewayAccessor {
|
|
return handle.b;
|
|
}
|
|
|
|
/**
|
|
* Seed the harness with test data for one or more scenarios.
|
|
*
|
|
* @param handle The harness handle returned by bootHarness().
|
|
* @param scenario Which scope variants to provision. Currently only 'all' is
|
|
* supported — passing any other value throws immediately with a
|
|
* clear error. Per-variant narrowing is tracked as M3-11.
|
|
*
|
|
* Returns a SeedResult with grant IDs, peer IDs, and admin tokens for each
|
|
* gateway, which test assertions can reference.
|
|
*
|
|
* IMPORTANT: The harness assumes a pristine database on both gateways. The seed
|
|
* bootstraps an admin user on each gateway via POST /api/bootstrap/setup. If
|
|
* either gateway already has users, seed() throws with a clear error message.
|
|
* Run 'docker compose down -v' to reset state.
|
|
*/
|
|
export async function seed(
|
|
handle: HarnessHandle,
|
|
scenario: SeedScenario = 'all',
|
|
): Promise<SeedResult> {
|
|
if (scenario !== 'all') {
|
|
throw new Error(
|
|
`seed: scenario narrowing not yet implemented; pass "all" for now. ` +
|
|
`Got: "${scenario}". Per-variant narrowing is tracked as M3-11.`,
|
|
);
|
|
}
|
|
|
|
const result = await runSeed({
|
|
serverAUrl: handle.a.baseUrl,
|
|
serverBUrl: handle.b.baseUrl,
|
|
adminBootstrapPasswordA: handle.a.bootstrapPassword,
|
|
adminBootstrapPasswordB: handle.b.bootstrapPassword,
|
|
});
|
|
|
|
handle.seedResult = result;
|
|
return result;
|
|
}
|