Compare commits
2 Commits
dc122e138b
...
feat/feder
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08bea8fba0 | ||
|
|
17f1423318 |
@@ -1,243 +0,0 @@
|
|||||||
/**
|
|
||||||
* Federation M2 E2E test — peer-add enrollment flow (FED-M2-10).
|
|
||||||
*
|
|
||||||
* Covers MILESTONES.md acceptance test #6:
|
|
||||||
* "`peer add <url>` on Server A yields an `active` peer record with a valid cert + key"
|
|
||||||
*
|
|
||||||
* This test simulates two gateways using a single bootstrapped NestJS app:
|
|
||||||
* - "Server A": the admin API that generates a keypair and stores the cert
|
|
||||||
* - "Server B": the enrollment endpoint that signs the CSR
|
|
||||||
* Both share the same DB + Step-CA in the test environment.
|
|
||||||
*
|
|
||||||
* Prerequisites:
|
|
||||||
* docker compose -f docker-compose.federated.yml --profile federated up -d
|
|
||||||
*
|
|
||||||
* Run:
|
|
||||||
* FEDERATED_INTEGRATION=1 STEP_CA_AVAILABLE=1 \
|
|
||||||
* STEP_CA_URL=https://localhost:9000 \
|
|
||||||
* STEP_CA_PROVISIONER_KEY_JSON="$(docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json)" \
|
|
||||||
* STEP_CA_ROOT_CERT_PATH=/tmp/step-ca-root.crt \
|
|
||||||
* pnpm --filter @mosaicstack/gateway test \
|
|
||||||
* src/__tests__/integration/federation-m2-e2e.integration.test.ts
|
|
||||||
*
|
|
||||||
* Obtaining Step-CA credentials:
|
|
||||||
* # Extract provisioner key from running container:
|
|
||||||
* # docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json
|
|
||||||
* # Copy root cert from container:
|
|
||||||
* # docker cp $(docker ps -qf name=step-ca):/home/step/certs/root_ca.crt /tmp/step-ca-root.crt
|
|
||||||
* # Then: export STEP_CA_ROOT_CERT_PATH=/tmp/step-ca-root.crt
|
|
||||||
*
|
|
||||||
* Skipped unless both FEDERATED_INTEGRATION=1 and STEP_CA_AVAILABLE=1 are set.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import * as crypto from 'node:crypto';
|
|
||||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
|
||||||
import { Test } from '@nestjs/testing';
|
|
||||||
import { ValidationPipe } from '@nestjs/common';
|
|
||||||
import { FastifyAdapter, type NestFastifyApplication } from '@nestjs/platform-fastify';
|
|
||||||
import supertest from 'supertest';
|
|
||||||
import {
|
|
||||||
createDb,
|
|
||||||
type Db,
|
|
||||||
type DbHandle,
|
|
||||||
federationPeers,
|
|
||||||
federationGrants,
|
|
||||||
federationEnrollmentTokens,
|
|
||||||
inArray,
|
|
||||||
eq,
|
|
||||||
} from '@mosaicstack/db';
|
|
||||||
import * as schema from '@mosaicstack/db';
|
|
||||||
import { DB } from '../../database/database.module.js';
|
|
||||||
import { AdminGuard } from '../../admin/admin.guard.js';
|
|
||||||
import { FederationModule } from '../../federation/federation.module.js';
|
|
||||||
import { GrantsService } from '../../federation/grants.service.js';
|
|
||||||
import { EnrollmentService } from '../../federation/enrollment.service.js';
|
|
||||||
|
|
||||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
|
||||||
const stepCaRun =
|
|
||||||
run &&
|
|
||||||
process.env['STEP_CA_AVAILABLE'] === '1' &&
|
|
||||||
!!process.env['STEP_CA_URL'] &&
|
|
||||||
!!process.env['STEP_CA_PROVISIONER_KEY_JSON'] &&
|
|
||||||
!!process.env['STEP_CA_ROOT_CERT_PATH'];
|
|
||||||
|
|
||||||
const PG_URL = 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
|
||||||
|
|
||||||
const RUN_ID = crypto.randomUUID();
|
|
||||||
|
|
||||||
describe.skipIf(!stepCaRun)('federation M2 E2E — peer add enrollment flow', () => {
|
|
||||||
let handle: DbHandle;
|
|
||||||
let db: Db;
|
|
||||||
let app: NestFastifyApplication;
|
|
||||||
let agent: ReturnType<typeof supertest>;
|
|
||||||
let grantsService: GrantsService;
|
|
||||||
let enrollmentService: EnrollmentService;
|
|
||||||
|
|
||||||
const createdTokenGrantIds: string[] = [];
|
|
||||||
const createdGrantIds: string[] = [];
|
|
||||||
const createdPeerIds: string[] = [];
|
|
||||||
const createdUserIds: string[] = [];
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
process.env['BETTER_AUTH_SECRET'] ??= 'test-e2e-sealing-key';
|
|
||||||
|
|
||||||
handle = createDb(PG_URL);
|
|
||||||
db = handle.db;
|
|
||||||
|
|
||||||
const moduleRef = await Test.createTestingModule({
|
|
||||||
imports: [FederationModule],
|
|
||||||
providers: [{ provide: DB, useValue: db }],
|
|
||||||
})
|
|
||||||
.overrideGuard(AdminGuard)
|
|
||||||
.useValue({ canActivate: () => true })
|
|
||||||
.compile();
|
|
||||||
|
|
||||||
app = moduleRef.createNestApplication<NestFastifyApplication>(new FastifyAdapter());
|
|
||||||
app.useGlobalPipes(new ValidationPipe({ whitelist: true, transform: true }));
|
|
||||||
await app.init();
|
|
||||||
await app.getHttpAdapter().getInstance().ready();
|
|
||||||
|
|
||||||
agent = supertest(app.getHttpServer());
|
|
||||||
|
|
||||||
grantsService = moduleRef.get(GrantsService);
|
|
||||||
enrollmentService = moduleRef.get(EnrollmentService);
|
|
||||||
}, 30_000);
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
if (db && createdTokenGrantIds.length > 0) {
|
|
||||||
await db
|
|
||||||
.delete(federationEnrollmentTokens)
|
|
||||||
.where(inArray(federationEnrollmentTokens.grantId, createdTokenGrantIds))
|
|
||||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
|
||||||
}
|
|
||||||
if (db && createdGrantIds.length > 0) {
|
|
||||||
await db
|
|
||||||
.delete(federationGrants)
|
|
||||||
.where(inArray(federationGrants.id, createdGrantIds))
|
|
||||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
|
||||||
}
|
|
||||||
if (db && createdPeerIds.length > 0) {
|
|
||||||
await db
|
|
||||||
.delete(federationPeers)
|
|
||||||
.where(inArray(federationPeers.id, createdPeerIds))
|
|
||||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
|
||||||
}
|
|
||||||
if (db && createdUserIds.length > 0) {
|
|
||||||
await db
|
|
||||||
.delete(schema.users)
|
|
||||||
.where(inArray(schema.users.id, createdUserIds))
|
|
||||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
|
||||||
}
|
|
||||||
if (app)
|
|
||||||
await app.close().catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
|
||||||
if (handle)
|
|
||||||
await handle.close().catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
|
||||||
});
|
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
// #6 — peer add: keypair → enrollment → cert storage → active peer record
|
|
||||||
// -------------------------------------------------------------------------
|
|
||||||
it('#6 — peer add flow: keypair → enrollment → cert storage → active peer record', async () => {
|
|
||||||
// Create a subject user to satisfy FK on federation_grants.subject_user_id
|
|
||||||
const userId = crypto.randomUUID();
|
|
||||||
await db
|
|
||||||
.insert(schema.users)
|
|
||||||
.values({
|
|
||||||
id: userId,
|
|
||||||
name: `e2e-user-${RUN_ID}`,
|
|
||||||
email: `e2e-${RUN_ID}@federation-test.invalid`,
|
|
||||||
emailVerified: false,
|
|
||||||
})
|
|
||||||
.onConflictDoNothing();
|
|
||||||
createdUserIds.push(userId);
|
|
||||||
|
|
||||||
// ── Step A: "Server B" setup ─────────────────────────────────────────
|
|
||||||
// Server B admin creates a grant and generates an enrollment token to
|
|
||||||
// share out-of-band with Server A's operator.
|
|
||||||
|
|
||||||
// Insert a placeholder peer on "Server B" to satisfy the grant FK
|
|
||||||
const serverBPeerId = crypto.randomUUID();
|
|
||||||
await db
|
|
||||||
.insert(federationPeers)
|
|
||||||
.values({
|
|
||||||
id: serverBPeerId,
|
|
||||||
commonName: `server-b-peer-${RUN_ID}`,
|
|
||||||
displayName: 'Server B Placeholder',
|
|
||||||
certPem: '-----BEGIN CERTIFICATE-----\nMOCK\n-----END CERTIFICATE-----\n',
|
|
||||||
certSerial: `serial-b-${serverBPeerId}`,
|
|
||||||
certNotAfter: new Date(Date.now() + 365 * 24 * 60 * 60 * 1000),
|
|
||||||
state: 'pending',
|
|
||||||
})
|
|
||||||
.onConflictDoNothing();
|
|
||||||
createdPeerIds.push(serverBPeerId);
|
|
||||||
|
|
||||||
const grant = await grantsService.createGrant({
|
|
||||||
subjectUserId: userId,
|
|
||||||
scope: { resources: ['tasks'], excluded_resources: [], max_rows_per_query: 100 },
|
|
||||||
peerId: serverBPeerId,
|
|
||||||
});
|
|
||||||
createdGrantIds.push(grant.id);
|
|
||||||
createdTokenGrantIds.push(grant.id);
|
|
||||||
|
|
||||||
const { token } = await enrollmentService.createToken({
|
|
||||||
grantId: grant.id,
|
|
||||||
peerId: serverBPeerId,
|
|
||||||
ttlSeconds: 900,
|
|
||||||
});
|
|
||||||
|
|
||||||
// ── Step B: "Server A" generates keypair ─────────────────────────────
|
|
||||||
const keypairRes = await agent
|
|
||||||
.post('/api/admin/federation/peers/keypair')
|
|
||||||
.send({
|
|
||||||
commonName: `e2e-peer-${RUN_ID.slice(0, 8)}`,
|
|
||||||
displayName: 'E2E Test Peer',
|
|
||||||
endpointUrl: 'https://test.invalid',
|
|
||||||
})
|
|
||||||
.set('Content-Type', 'application/json');
|
|
||||||
|
|
||||||
expect(keypairRes.status).toBe(201);
|
|
||||||
const { peerId, csrPem } = keypairRes.body as { peerId: string; csrPem: string };
|
|
||||||
expect(typeof peerId).toBe('string');
|
|
||||||
expect(csrPem).toContain('-----BEGIN CERTIFICATE REQUEST-----');
|
|
||||||
createdPeerIds.push(peerId);
|
|
||||||
|
|
||||||
// ── Step C: Enrollment (simulates Server A sending CSR to Server B) ──
|
|
||||||
const enrollRes = await agent
|
|
||||||
.post(`/api/federation/enrollment/${token}`)
|
|
||||||
.send({ csrPem })
|
|
||||||
.set('Content-Type', 'application/json');
|
|
||||||
|
|
||||||
expect(enrollRes.status).toBe(200);
|
|
||||||
const { certPem, certChainPem } = enrollRes.body as {
|
|
||||||
certPem: string;
|
|
||||||
certChainPem: string;
|
|
||||||
};
|
|
||||||
expect(certPem).toContain('-----BEGIN CERTIFICATE-----');
|
|
||||||
expect(certChainPem).toContain('-----BEGIN CERTIFICATE-----');
|
|
||||||
|
|
||||||
// ── Step D: "Server A" stores the cert ───────────────────────────────
|
|
||||||
const storeRes = await agent
|
|
||||||
.patch(`/api/admin/federation/peers/${peerId}/cert`)
|
|
||||||
.send({ certPem })
|
|
||||||
.set('Content-Type', 'application/json');
|
|
||||||
|
|
||||||
expect(storeRes.status).toBe(200);
|
|
||||||
|
|
||||||
// ── Step E: Verify peer record in DB ─────────────────────────────────
|
|
||||||
const [peer] = await db
|
|
||||||
.select()
|
|
||||||
.from(federationPeers)
|
|
||||||
.where(eq(federationPeers.id, peerId))
|
|
||||||
.limit(1);
|
|
||||||
|
|
||||||
expect(peer).toBeDefined();
|
|
||||||
expect(peer?.state).toBe('active');
|
|
||||||
expect(peer?.certPem).toContain('-----BEGIN CERTIFICATE-----');
|
|
||||||
expect(typeof peer?.certSerial).toBe('string');
|
|
||||||
expect((peer?.certSerial ?? '').length).toBeGreaterThan(0);
|
|
||||||
// clientKeyPem is a sealed ciphertext — must not be a raw PEM
|
|
||||||
expect(peer?.clientKeyPem?.startsWith('-----BEGIN')).toBe(false);
|
|
||||||
// certNotAfter must be in the future
|
|
||||||
expect(peer?.certNotAfter?.getTime()).toBeGreaterThan(Date.now());
|
|
||||||
}, 60_000);
|
|
||||||
});
|
|
||||||
@@ -35,7 +35,7 @@ import * as crypto from 'node:crypto';
|
|||||||
import * as fs from 'node:fs';
|
import * as fs from 'node:fs';
|
||||||
import * as https from 'node:https';
|
import * as https from 'node:https';
|
||||||
import { SignJWT, importJWK } from 'jose';
|
import { SignJWT, importJWK } from 'jose';
|
||||||
import { Pkcs10CertificateRequest, X509Certificate } from '@peculiar/x509';
|
import { Pkcs10CertificateRequest } from '@peculiar/x509';
|
||||||
import type { IssueCertRequestDto } from './ca.dto.js';
|
import type { IssueCertRequestDto } from './ca.dto.js';
|
||||||
import { IssuedCertDto } from './ca.dto.js';
|
import { IssuedCertDto } from './ca.dto.js';
|
||||||
|
|
||||||
@@ -624,51 +624,6 @@ export class CaService {
|
|||||||
|
|
||||||
const serialNumber = extractSerial(response.crt);
|
const serialNumber = extractSerial(response.crt);
|
||||||
|
|
||||||
// CRIT-1: Verify the issued certificate contains both Mosaic OID extensions
|
|
||||||
// with the correct values. Step-CA's federation.tpl encodes each as an ASN.1
|
|
||||||
// UTF8String TLV: tag 0x0C + 1-byte length + UUID bytes. We skip 2 bytes
|
|
||||||
// (tag + length) to extract the raw UUID string.
|
|
||||||
const issuedCert = new X509Certificate(response.crt);
|
|
||||||
const decoder = new TextDecoder();
|
|
||||||
|
|
||||||
const grantIdExt = issuedCert.getExtension('1.3.6.1.4.1.99999.1');
|
|
||||||
if (!grantIdExt) {
|
|
||||||
throw new CaServiceError(
|
|
||||||
'Issued certificate is missing required Mosaic OID: mosaic_grant_id',
|
|
||||||
'The Step-CA federation.tpl template did not embed OID 1.3.6.1.4.1.99999.1. Check the provisioner template configuration.',
|
|
||||||
undefined,
|
|
||||||
'OID_MISSING',
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const grantIdInCert = decoder.decode(grantIdExt.value.slice(2));
|
|
||||||
if (grantIdInCert !== req.grantId) {
|
|
||||||
throw new CaServiceError(
|
|
||||||
`Issued certificate mosaic_grant_id mismatch: expected ${req.grantId}, got ${grantIdInCert}`,
|
|
||||||
'The Step-CA issued a certificate with a different grant ID than requested. This may indicate a provisioner misconfiguration or a MITM.',
|
|
||||||
undefined,
|
|
||||||
'OID_MISMATCH',
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const subjectUserIdExt = issuedCert.getExtension('1.3.6.1.4.1.99999.2');
|
|
||||||
if (!subjectUserIdExt) {
|
|
||||||
throw new CaServiceError(
|
|
||||||
'Issued certificate is missing required Mosaic OID: mosaic_subject_user_id',
|
|
||||||
'The Step-CA federation.tpl template did not embed OID 1.3.6.1.4.1.99999.2. Check the provisioner template configuration.',
|
|
||||||
undefined,
|
|
||||||
'OID_MISSING',
|
|
||||||
);
|
|
||||||
}
|
|
||||||
const subjectUserIdInCert = decoder.decode(subjectUserIdExt.value.slice(2));
|
|
||||||
if (subjectUserIdInCert !== req.subjectUserId) {
|
|
||||||
throw new CaServiceError(
|
|
||||||
`Issued certificate mosaic_subject_user_id mismatch: expected ${req.subjectUserId}, got ${subjectUserIdInCert}`,
|
|
||||||
'The Step-CA issued a certificate with a different subject user ID than requested. This may indicate a provisioner misconfiguration or a MITM.',
|
|
||||||
undefined,
|
|
||||||
'OID_MISMATCH',
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.logger.log(`Certificate issued — serial=${serialNumber} grantId=${req.grantId}`);
|
this.logger.log(`Certificate issued — serial=${serialNumber} grantId=${req.grantId}`);
|
||||||
|
|
||||||
const result = new IssuedCertDto();
|
const result = new IssuedCertDto();
|
||||||
|
|||||||
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
import {
|
import {
|
||||||
BadRequestException,
|
BadRequestException,
|
||||||
ConflictException,
|
|
||||||
GoneException,
|
GoneException,
|
||||||
Inject,
|
Inject,
|
||||||
Injectable,
|
Injectable,
|
||||||
@@ -67,21 +66,6 @@ export class EnrollmentService {
|
|||||||
*/
|
*/
|
||||||
async createToken(dto: CreateEnrollmentTokenDto): Promise<EnrollmentTokenResult> {
|
async createToken(dto: CreateEnrollmentTokenDto): Promise<EnrollmentTokenResult> {
|
||||||
const ttl = Math.min(dto.ttlSeconds, 900);
|
const ttl = Math.min(dto.ttlSeconds, 900);
|
||||||
|
|
||||||
// MED-3: Verify the grantId ↔ peerId binding — prevents attacker from
|
|
||||||
// cross-wiring grants to attacker-controlled peers.
|
|
||||||
const [grant] = await this.db
|
|
||||||
.select({ peerId: federationGrants.peerId })
|
|
||||||
.from(federationGrants)
|
|
||||||
.where(eq(federationGrants.id, dto.grantId))
|
|
||||||
.limit(1);
|
|
||||||
if (!grant) {
|
|
||||||
throw new NotFoundException(`Grant ${dto.grantId} not found`);
|
|
||||||
}
|
|
||||||
if (grant.peerId !== dto.peerId) {
|
|
||||||
throw new BadRequestException(`peerId does not match the grant's registered peer`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const token = crypto.randomBytes(32).toString('hex');
|
const token = crypto.randomBytes(32).toString('hex');
|
||||||
const expiresAt = new Date(Date.now() + ttl * 1000);
|
const expiresAt = new Date(Date.now() + ttl * 1000);
|
||||||
|
|
||||||
@@ -115,167 +99,132 @@ export class EnrollmentService {
|
|||||||
* 8. Return { certPem, certChainPem }
|
* 8. Return { certPem, certChainPem }
|
||||||
*/
|
*/
|
||||||
async redeem(token: string, csrPem: string): Promise<RedeemResult> {
|
async redeem(token: string, csrPem: string): Promise<RedeemResult> {
|
||||||
// HIGH-5: Track outcome so we can write a failure audit row on any error.
|
// 1. Fetch token row
|
||||||
let outcome: 'allowed' | 'denied' = 'denied';
|
const [row] = await this.db
|
||||||
// row may be undefined if the token is not found — used defensively in catch.
|
.select()
|
||||||
let row: typeof federationEnrollmentTokens.$inferSelect | undefined;
|
.from(federationEnrollmentTokens)
|
||||||
|
.where(eq(federationEnrollmentTokens.token, token))
|
||||||
|
.limit(1);
|
||||||
|
|
||||||
|
if (!row) {
|
||||||
|
throw new NotFoundException('Enrollment token not found');
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Already used?
|
||||||
|
if (row.usedAt !== null) {
|
||||||
|
throw new GoneException('Enrollment token has already been used');
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. Expired?
|
||||||
|
if (row.expiresAt < new Date()) {
|
||||||
|
throw new GoneException('Enrollment token has expired');
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. Load grant and verify it is still pending
|
||||||
|
let grant;
|
||||||
try {
|
try {
|
||||||
// 1. Fetch token row
|
grant = await this.grantsService.getGrant(row.grantId);
|
||||||
const [fetchedRow] = await this.db
|
|
||||||
.select()
|
|
||||||
.from(federationEnrollmentTokens)
|
|
||||||
.where(eq(federationEnrollmentTokens.token, token))
|
|
||||||
.limit(1);
|
|
||||||
|
|
||||||
if (!fetchedRow) {
|
|
||||||
throw new NotFoundException('Enrollment token not found');
|
|
||||||
}
|
|
||||||
row = fetchedRow;
|
|
||||||
|
|
||||||
// 2. Already used?
|
|
||||||
if (row.usedAt !== null) {
|
|
||||||
throw new GoneException('Enrollment token has already been used');
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Expired?
|
|
||||||
if (row.expiresAt < new Date()) {
|
|
||||||
throw new GoneException('Enrollment token has expired');
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Load grant and verify it is still pending
|
|
||||||
let grant;
|
|
||||||
try {
|
|
||||||
grant = await this.grantsService.getGrant(row.grantId);
|
|
||||||
} catch (err) {
|
|
||||||
if (err instanceof FederationScopeError) {
|
|
||||||
throw new BadRequestException(err.message);
|
|
||||||
}
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (grant.status !== 'pending') {
|
|
||||||
throw new GoneException(
|
|
||||||
`Grant ${row.grantId} is no longer pending (status: ${grant.status})`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Atomically claim the token BEFORE cert issuance to prevent double-minting.
|
|
||||||
// WHERE used_at IS NULL ensures only one concurrent request wins.
|
|
||||||
// Using .returning() works on both node-postgres and PGlite without rowCount inspection.
|
|
||||||
const claimed = await this.db
|
|
||||||
.update(federationEnrollmentTokens)
|
|
||||||
.set({ usedAt: sql`NOW()` })
|
|
||||||
.where(
|
|
||||||
and(
|
|
||||||
eq(federationEnrollmentTokens.token, token),
|
|
||||||
isNull(federationEnrollmentTokens.usedAt),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.returning({ token: federationEnrollmentTokens.token });
|
|
||||||
|
|
||||||
if (claimed.length === 0) {
|
|
||||||
throw new GoneException('Enrollment token has already been used (concurrent request)');
|
|
||||||
}
|
|
||||||
|
|
||||||
// 6. Issue certificate via CaService (network call — outside any transaction).
|
|
||||||
// If this throws, the token is already consumed. The grant stays pending.
|
|
||||||
// Admin must revoke the grant and create a new one.
|
|
||||||
let issued;
|
|
||||||
try {
|
|
||||||
issued = await this.caService.issueCert({
|
|
||||||
csrPem,
|
|
||||||
grantId: row.grantId,
|
|
||||||
subjectUserId: grant.subjectUserId,
|
|
||||||
ttlSeconds: 300,
|
|
||||||
});
|
|
||||||
} catch (err) {
|
|
||||||
// HIGH-4: Log only the first 8 hex chars of the token for correlation — never log the full token.
|
|
||||||
this.logger.error(
|
|
||||||
`issueCert failed after token ${token.slice(0, 8)}... was claimed — grant ${row.grantId} is stranded pending`,
|
|
||||||
err instanceof Error ? err.stack : String(err),
|
|
||||||
);
|
|
||||||
if (err instanceof FederationScopeError) {
|
|
||||||
throw new BadRequestException((err as Error).message);
|
|
||||||
}
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
|
|
||||||
// 7. Atomically activate grant, update peer record, and write audit log.
|
|
||||||
const certNotAfter = this.extractCertNotAfter(issued.certPem);
|
|
||||||
await this.db.transaction(async (tx) => {
|
|
||||||
// CRIT-2: Guard activation with WHERE status='pending' to prevent double-activation.
|
|
||||||
const [activated] = await tx
|
|
||||||
.update(federationGrants)
|
|
||||||
.set({ status: 'active' })
|
|
||||||
.where(and(eq(federationGrants.id, row!.grantId), eq(federationGrants.status, 'pending')))
|
|
||||||
.returning({ id: federationGrants.id });
|
|
||||||
if (!activated) {
|
|
||||||
throw new ConflictException(
|
|
||||||
`Grant ${row!.grantId} is no longer pending — cannot activate`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CRIT-2: Guard peer update with WHERE state='pending'.
|
|
||||||
await tx
|
|
||||||
.update(federationPeers)
|
|
||||||
.set({
|
|
||||||
certPem: issued.certPem,
|
|
||||||
certSerial: issued.serialNumber,
|
|
||||||
certNotAfter,
|
|
||||||
state: 'active',
|
|
||||||
})
|
|
||||||
.where(and(eq(federationPeers.id, row!.peerId), eq(federationPeers.state, 'pending')));
|
|
||||||
|
|
||||||
await tx.insert(federationAuditLog).values({
|
|
||||||
requestId: crypto.randomUUID(),
|
|
||||||
peerId: row!.peerId,
|
|
||||||
grantId: row!.grantId,
|
|
||||||
verb: 'enrollment',
|
|
||||||
resource: 'federation_grant',
|
|
||||||
statusCode: 200,
|
|
||||||
outcome: 'allowed',
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
this.logger.log(
|
|
||||||
`Enrollment complete — peerId=${row.peerId} grantId=${row.grantId} serial=${issued.serialNumber}`,
|
|
||||||
);
|
|
||||||
|
|
||||||
outcome = 'allowed';
|
|
||||||
|
|
||||||
// 8. Return cert material
|
|
||||||
return {
|
|
||||||
certPem: issued.certPem,
|
|
||||||
certChainPem: issued.certChainPem,
|
|
||||||
};
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
// HIGH-5: Best-effort audit write on failure — do not let this throw.
|
if (err instanceof FederationScopeError) {
|
||||||
if (outcome === 'denied') {
|
throw new BadRequestException(err.message);
|
||||||
await this.db
|
|
||||||
.insert(federationAuditLog)
|
|
||||||
.values({
|
|
||||||
requestId: crypto.randomUUID(),
|
|
||||||
peerId: row?.peerId ?? null,
|
|
||||||
grantId: row?.grantId ?? null,
|
|
||||||
verb: 'enrollment',
|
|
||||||
resource: 'federation_grant',
|
|
||||||
statusCode:
|
|
||||||
err instanceof GoneException ? 410 : err instanceof NotFoundException ? 404 : 500,
|
|
||||||
outcome: 'denied',
|
|
||||||
})
|
|
||||||
.catch(() => {});
|
|
||||||
}
|
}
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (grant.status !== 'pending') {
|
||||||
|
throw new GoneException(
|
||||||
|
`Grant ${row.grantId} is no longer pending (status: ${grant.status})`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. Atomically claim the token BEFORE cert issuance to prevent double-minting.
|
||||||
|
// WHERE used_at IS NULL ensures only one concurrent request wins.
|
||||||
|
// Using .returning() works on both node-postgres and PGlite without rowCount inspection.
|
||||||
|
const claimed = await this.db
|
||||||
|
.update(federationEnrollmentTokens)
|
||||||
|
.set({ usedAt: sql`NOW()` })
|
||||||
|
.where(
|
||||||
|
and(eq(federationEnrollmentTokens.token, token), isNull(federationEnrollmentTokens.usedAt)),
|
||||||
|
)
|
||||||
|
.returning({ token: federationEnrollmentTokens.token });
|
||||||
|
|
||||||
|
if (claimed.length === 0) {
|
||||||
|
throw new GoneException('Enrollment token has already been used (concurrent request)');
|
||||||
|
}
|
||||||
|
|
||||||
|
// 6. Issue certificate via CaService (network call — outside any transaction).
|
||||||
|
// If this throws, the token is already consumed. The grant stays pending.
|
||||||
|
// Admin must revoke the grant and create a new one.
|
||||||
|
let issued;
|
||||||
|
try {
|
||||||
|
issued = await this.caService.issueCert({
|
||||||
|
csrPem,
|
||||||
|
grantId: row.grantId,
|
||||||
|
subjectUserId: grant.subjectUserId,
|
||||||
|
ttlSeconds: 300,
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
this.logger.error(
|
||||||
|
`issueCert failed after token ${token} was claimed — grant ${row.grantId} is stranded pending`,
|
||||||
|
err instanceof Error ? err.stack : String(err),
|
||||||
|
);
|
||||||
|
if (err instanceof FederationScopeError) {
|
||||||
|
throw new BadRequestException((err as Error).message);
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 7. Atomically activate grant, update peer record, and write audit log.
|
||||||
|
const certNotAfter = this.extractCertNotAfter(issued.certPem);
|
||||||
|
await this.db.transaction(async (tx) => {
|
||||||
|
await tx
|
||||||
|
.update(federationGrants)
|
||||||
|
.set({ status: 'active' })
|
||||||
|
.where(eq(federationGrants.id, row.grantId));
|
||||||
|
|
||||||
|
await tx
|
||||||
|
.update(federationPeers)
|
||||||
|
.set({
|
||||||
|
certPem: issued.certPem,
|
||||||
|
certSerial: issued.serialNumber,
|
||||||
|
certNotAfter,
|
||||||
|
state: 'active',
|
||||||
|
})
|
||||||
|
.where(eq(federationPeers.id, row.peerId));
|
||||||
|
|
||||||
|
await tx.insert(federationAuditLog).values({
|
||||||
|
requestId: crypto.randomUUID(),
|
||||||
|
peerId: row.peerId,
|
||||||
|
grantId: row.grantId,
|
||||||
|
verb: 'enrollment',
|
||||||
|
resource: 'federation_grant',
|
||||||
|
statusCode: 200,
|
||||||
|
outcome: 'allowed',
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
this.logger.log(
|
||||||
|
`Enrollment complete — peerId=${row.peerId} grantId=${row.grantId} serial=${issued.serialNumber}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
// 8. Return cert material
|
||||||
|
return {
|
||||||
|
certPem: issued.certPem,
|
||||||
|
certChainPem: issued.certChainPem,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Extract the notAfter date from a PEM certificate.
|
* Extract the notAfter date from a PEM certificate.
|
||||||
* HIGH-2: No silent fallback — a cert that cannot be parsed should fail loud.
|
* Falls back to 90 days from now if parsing fails.
|
||||||
*/
|
*/
|
||||||
private extractCertNotAfter(certPem: string): Date {
|
private extractCertNotAfter(certPem: string): Date {
|
||||||
const cert = new X509Certificate(certPem);
|
try {
|
||||||
return new Date(cert.validTo);
|
const cert = new X509Certificate(certPem);
|
||||||
|
return new Date(cert.validTo);
|
||||||
|
} catch {
|
||||||
|
// Fallback: 90 days from now
|
||||||
|
return new Date(Date.now() + 90 * 24 * 60 * 60 * 1000);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
# Mosaic Federation — Admin CLI Reference
|
|
||||||
|
|
||||||
Available since: FED-M2
|
|
||||||
|
|
||||||
## Grant Management
|
|
||||||
|
|
||||||
### Create a grant
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation grant create --user <userId> --peer <peerId> --scope <scope-file.json>
|
|
||||||
```
|
|
||||||
|
|
||||||
The scope file defines what resources and rows the peer may access:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"resources": ["tasks", "notes"],
|
|
||||||
"excluded_resources": ["credentials"],
|
|
||||||
"max_rows_per_query": 100
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Valid resource values: `tasks`, `notes`, `credentials`, `teams`, `users`
|
|
||||||
|
|
||||||
### List grants
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation grant list [--peer <peerId>] [--status pending|active|revoked|expired]
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows all federation grants, optionally filtered by peer or status.
|
|
||||||
|
|
||||||
### Show a grant
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation grant show <grantId>
|
|
||||||
```
|
|
||||||
|
|
||||||
Display details of a single grant, including its scope, activation timestamp, and status.
|
|
||||||
|
|
||||||
### Revoke a grant
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation grant revoke <grantId> [--reason "Reason text"]
|
|
||||||
```
|
|
||||||
|
|
||||||
Revoke an active grant immediately. Revoked grants cannot be reactivated. The optional reason is stored in the audit log.
|
|
||||||
|
|
||||||
### Generate enrollment token
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation grant token <grantId> [--ttl <seconds>]
|
|
||||||
```
|
|
||||||
|
|
||||||
Generate a single-use enrollment token for the grant. The default TTL is 900 seconds (15 minutes); maximum 15 minutes.
|
|
||||||
|
|
||||||
Output includes the token and the full enrollment URL for the peer to use.
|
|
||||||
|
|
||||||
## Peer Management
|
|
||||||
|
|
||||||
### Add a peer (remote enrollment)
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation peer add <enrollment-url>
|
|
||||||
```
|
|
||||||
|
|
||||||
Enroll a remote peer using the enrollment URL obtained from a grant token. The command:
|
|
||||||
|
|
||||||
1. Generates a P-256 ECDSA keypair locally
|
|
||||||
2. Creates a certificate signing request (CSR)
|
|
||||||
3. Submits the CSR to the enrollment URL
|
|
||||||
4. Verifies the returned certificate includes the correct custom OIDs (grant ID and subject user ID)
|
|
||||||
5. Seals the private key at rest using `BETTER_AUTH_SECRET`
|
|
||||||
6. Stores the peer record and sealed key in the local gateway database
|
|
||||||
|
|
||||||
Once enrollment completes, the peer can authenticate using the certificate and private key.
|
|
||||||
|
|
||||||
### List peers
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mosaic federation peer list
|
|
||||||
```
|
|
||||||
|
|
||||||
Shows all enrolled peers, including their certificate fingerprints and activation status.
|
|
||||||
|
|
||||||
## REST API Reference
|
|
||||||
|
|
||||||
All CLI commands call the local gateway admin API. Equivalent REST endpoints:
|
|
||||||
|
|
||||||
| CLI Command | REST Endpoint | Method |
|
|
||||||
| ------------ | ------------------------------------------------------------------------------------------- | ----------------- |
|
|
||||||
| grant create | `/api/admin/federation/grants` | POST |
|
|
||||||
| grant list | `/api/admin/federation/grants` | GET |
|
|
||||||
| grant show | `/api/admin/federation/grants/:id` | GET |
|
|
||||||
| grant revoke | `/api/admin/federation/grants/:id/revoke` | PATCH |
|
|
||||||
| grant token | `/api/admin/federation/grants/:id/tokens` | POST |
|
|
||||||
| peer list | `/api/admin/federation/peers` | GET |
|
|
||||||
| peer add | `/api/admin/federation/peers/keypair` + enrollment + `/api/admin/federation/peers/:id/cert` | POST, POST, PATCH |
|
|
||||||
|
|
||||||
## Security Notes
|
|
||||||
|
|
||||||
- **Enrollment tokens** are single-use and expire in 15 minutes (not configurable beyond 15 minutes)
|
|
||||||
- **Peer private keys** are encrypted at rest using AES-256-GCM, keyed from `BETTER_AUTH_SECRET`
|
|
||||||
- **Custom OIDs** in issued certificates are verified post-issuance: the grant ID and subject user ID must match the certificate extensions
|
|
||||||
- **Grant activation** is atomic — concurrent enrollment attempts for the same grant are rejected
|
|
||||||
- **Revoked grants** cannot be activated; peers attempting to use a revoked grant's token will be rejected
|
|
||||||
@@ -7,11 +7,11 @@
|
|||||||
|
|
||||||
**ID:** federation-v1-20260419
|
**ID:** federation-v1-20260419
|
||||||
**Statement:** Jarvis operates across 3–4 workstations in two physical locations (home, USC). The user currently reaches back to a single jarvis-brain checkout from every session; a prior OpenBrain attempt caused cache, latency, and opacity pain. This mission builds asymmetric federation between Mosaic Stack gateways so that a session on a user's home gateway can query their work gateway in real time without data ever persisting across the boundary, with full multi-tenant isolation and standard-PKI (X.509 / Step-CA) trust management.
|
**Statement:** Jarvis operates across 3–4 workstations in two physical locations (home, USC). The user currently reaches back to a single jarvis-brain checkout from every session; a prior OpenBrain attempt caused cache, latency, and opacity pain. This mission builds asymmetric federation between Mosaic Stack gateways so that a session on a user's home gateway can query their work gateway in real time without data ever persisting across the boundary, with full multi-tenant isolation and standard-PKI (X.509 / Step-CA) trust management.
|
||||||
**Phase:** M3 active — mTLS handshake + list/get/capabilities verbs + scope enforcement
|
**Phase:** M2 active — Step-CA + grant schema + admin CLI; parallel test-deploy workstream stood up
|
||||||
**Current Milestone:** FED-M3
|
**Current Milestone:** FED-M2
|
||||||
**Progress:** 2 / 7 milestones
|
**Progress:** 1 / 7 milestones
|
||||||
**Status:** active
|
**Status:** active
|
||||||
**Last Updated:** 2026-04-21 (M2 closed via PR #503, tag `fed-v0.2.0-m2`, issue #461 closed; M3 decomposed into 14 tasks)
|
**Last Updated:** 2026-04-21 (M2 decomposed; mos-test-1/-2 designated as federation E2E test hosts)
|
||||||
**Parent Mission:** None — new mission
|
**Parent Mission:** None — new mission
|
||||||
|
|
||||||
## Test Infrastructure
|
## Test Infrastructure
|
||||||
@@ -63,8 +63,8 @@ Key design references:
|
|||||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||||
| --- | ------ | --------------------------------------------- | ----------- | ------------------ | ----- | ---------- | ---------- |
|
| --- | ------ | --------------------------------------------- | ----------- | ------------------ | ----- | ---------- | ---------- |
|
||||||
| 1 | FED-M1 | Federated tier infrastructure | done | (12 PRs #470-#481) | #460 | 2026-04-19 | 2026-04-19 |
|
| 1 | FED-M1 | Federated tier infrastructure | done | (12 PRs #470-#481) | #460 | 2026-04-19 | 2026-04-19 |
|
||||||
| 2 | FED-M2 | Step-CA + grant schema + admin CLI | done | (PRs #483-#503) | #461 | 2026-04-21 | 2026-04-21 |
|
| 2 | FED-M2 | Step-CA + grant schema + admin CLI | in-progress | (decomposition) | #461 | 2026-04-21 | — |
|
||||||
| 3 | FED-M3 | mTLS handshake + list/get + scope enforcement | in-progress | (decomposition) | #462 | 2026-04-21 | — |
|
| 3 | FED-M3 | mTLS handshake + list/get + scope enforcement | not-started | — | #462 | — | — |
|
||||||
| 4 | FED-M4 | search verb + audit log + rate limit | not-started | — | #463 | — | — |
|
| 4 | FED-M4 | search verb + audit log + rate limit | not-started | — | #463 | — | — |
|
||||||
| 5 | FED-M5 | Cache + offline degradation + OTEL | not-started | — | #464 | — | — |
|
| 5 | FED-M5 | Cache + offline degradation + OTEL | not-started | — | #464 | — | — |
|
||||||
| 6 | FED-M6 | Revocation + auto-renewal + CRL | not-started | — | #465 | — | — |
|
| 6 | FED-M6 | Revocation + auto-renewal + CRL | not-started | — | #465 | — | — |
|
||||||
@@ -85,24 +85,17 @@ Key design references:
|
|||||||
|
|
||||||
## Session History
|
## Session History
|
||||||
|
|
||||||
| Session | Date | Runtime | Outcome |
|
| Session | Date | Runtime | Outcome |
|
||||||
| ------- | ----------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------- |
|
| ------- | ---------- | ------- | --------------------------------------------------------------------- |
|
||||||
| S1 | 2026-04-19 | claude | PRD authored, MILESTONES decomposed, 7 issues filed |
|
| S1 | 2026-04-19 | claude | PRD authored, MILESTONES decomposed, 7 issues filed |
|
||||||
| S2-S4 | 2026-04-19 | claude | FED-M1 complete: 12 tasks (PRs #470-#481) merged; tag `fed-v0.1.0-m1` |
|
| S2-S4 | 2026-04-19 | claude | FED-M1 complete: 12 tasks (PRs #470-#481) merged; tag `fed-v0.1.0-m1` |
|
||||||
| S5-S22 | 2026-04-19 → 2026-04-21 | claude | FED-M2 complete: 13 tasks (PRs #483-#503) merged; tag `fed-v0.2.0-m2`; issue #461 closed. Step-CA + grant schema + admin CLI shipped. |
|
|
||||||
| S23 | 2026-04-21 | claude | M3 decomposed into 14 tasks in `docs/federation/TASKS.md`. Manifest M3 row → in-progress. Next: kickoff M3-01. |
|
|
||||||
|
|
||||||
## Next Step
|
## Next Step
|
||||||
|
|
||||||
FED-M3 active. Decomposition landed in `docs/federation/TASKS.md` (M3-01..M3-14, ~100K estimate). Tracking issue #462.
|
FED-M2 active. Decomposition landed in `docs/federation/TASKS.md` (M2-01..M2-13 code workstream + DEPLOY-01..DEPLOY-05 parallel test-deploy workstream, ~88K total). Tracking issue #482.
|
||||||
|
|
||||||
Execution plan (parallel where possible):
|
Parallel execution plan:
|
||||||
|
|
||||||
- **Foundation**: M3-01 (DTOs in `packages/types/src/federation/`) starts immediately — sonnet subagent on `feat/federation-m3-types`. Blocks all server + client work.
|
- **CODE workstream**: M2-01 (DB migration) starts immediately — sonnet subagent on `feat/federation-m2-schema`. Then M2-02 → M2-09 sequentially with M2-04/M2-05/M2-06/M2-07 having interleaved CA/storage/grant dependencies.
|
||||||
- **Server stream** (after M3-01): M3-03 (AuthGuard) + M3-04 (ScopeService) in series, then M3-05 / M3-06 / M3-07 (verbs) in parallel.
|
- **DEPLOY workstream**: DEPLOY-01 (image verify) → DEPLOY-02 (stack template) → DEPLOY-03/04 (mos-test-1/-2 deploy) → DEPLOY-05 (TEST-INFRA.md). Gated on Portainer wrapper PR (`PORTAINER_INSECURE` flag) merging first.
|
||||||
- **Client stream** (after M3-01, parallel with server): M3-08 (FederationClient) → M3-09 (QuerySourceService).
|
- **Re-converge** at M2-10 (E2E test) once both workstreams ready.
|
||||||
- **Harness** (parallel with everything): M3-02 (`tools/federation-harness/`) — needed for M3-11.
|
|
||||||
- **Test gates**: M3-10 (Integration) → M3-11 (E2E with harness) → M3-12 (Independent security review, two rounds budgeted).
|
|
||||||
- **Close**: M3-13 (Docs) → M3-14 (release tag `fed-v0.3.0-m3`, close #462).
|
|
||||||
|
|
||||||
**Test-bed fallback:** `mos-test-1/-2` deploy is still blocked on `FED-M2-DEPLOY-IMG-FIX`. The harness in M3-02 ships a local two-gateway docker-compose so M3-11 is not blocked. Production-host validation is M7's responsibility (PRD AC-12).
|
|
||||||
|
|||||||
@@ -70,96 +70,6 @@ For JSON output (useful in CI/automation):
|
|||||||
mosaic gateway doctor --json
|
mosaic gateway doctor --json
|
||||||
```
|
```
|
||||||
|
|
||||||
## Step 2: Step-CA Bootstrap
|
|
||||||
|
|
||||||
Step-CA is a certificate authority that issues X.509 certificates for federation peers. In Mosaic federation, it signs peer certificates with custom OIDs that embed grant and user identities, enforcing authorization at the certificate level.
|
|
||||||
|
|
||||||
### Prerequisites for Step-CA
|
|
||||||
|
|
||||||
Before starting the CA, you must set up the dev password:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cp infra/step-ca/dev-password.example infra/step-ca/dev-password
|
|
||||||
# Edit dev-password and set your CA password (minimum 16 characters)
|
|
||||||
```
|
|
||||||
|
|
||||||
The password is required for the CA to boot and derive the provisioner key used by the gateway.
|
|
||||||
|
|
||||||
### Start the Step-CA service
|
|
||||||
|
|
||||||
Add the step-ca service to your federated stack:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker compose -f docker-compose.federated.yml --profile federated up -d step-ca
|
|
||||||
```
|
|
||||||
|
|
||||||
On first boot, the init script (`infra/step-ca/init.sh`) runs automatically. It:
|
|
||||||
|
|
||||||
- Generates the CA root key and certificate in the Docker volume
|
|
||||||
- Creates the `mosaic-fed` JWK provisioner
|
|
||||||
- Applies the X.509 template from `infra/step-ca/templates/federation.tpl`
|
|
||||||
|
|
||||||
The volume is persistent, so subsequent boots reuse the existing CA keys.
|
|
||||||
|
|
||||||
Verify the CA is healthy:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
curl https://localhost:9000/health --cacert /tmp/step-ca-root.crt
|
|
||||||
```
|
|
||||||
|
|
||||||
(If the root cert file doesn't exist yet, see the extraction steps below.)
|
|
||||||
|
|
||||||
### Extract credentials for the gateway
|
|
||||||
|
|
||||||
The gateway requires two credentials from the running CA:
|
|
||||||
|
|
||||||
**1. Provisioner key (for `STEP_CA_PROVISIONER_KEY_JSON`)**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json > /tmp/step-ca-provisioner.json
|
|
||||||
```
|
|
||||||
|
|
||||||
This JSON file contains the JWK public and private keys for the `mosaic-fed` provisioner. Store it securely and pass its contents to the gateway via the `STEP_CA_PROVISIONER_KEY_JSON` environment variable.
|
|
||||||
|
|
||||||
**2. Root certificate (for `STEP_CA_ROOT_CERT_PATH`)**
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker cp $(docker ps -qf name=step-ca):/home/step/certs/root_ca.crt /tmp/step-ca-root.crt
|
|
||||||
```
|
|
||||||
|
|
||||||
This PEM file is the CA's root certificate, used to verify peer certificates issued by step-ca. Pass its path to the gateway via `STEP_CA_ROOT_CERT_PATH`.
|
|
||||||
|
|
||||||
### Custom OID Registry
|
|
||||||
|
|
||||||
Federation certificates include custom OIDs in the certificate extension. These encode authorization metadata:
|
|
||||||
|
|
||||||
| OID | Name | Description |
|
|
||||||
| ------------------- | ---------------------- | --------------------- |
|
|
||||||
| 1.3.6.1.4.1.99999.1 | mosaic_grant_id | Federation grant UUID |
|
|
||||||
| 1.3.6.1.4.1.99999.2 | mosaic_subject_user_id | Subject user UUID |
|
|
||||||
|
|
||||||
These OIDs are verified by the gateway after the CSR is signed, ensuring the certificate was issued with the correct grant and user context.
|
|
||||||
|
|
||||||
### Environment Variables
|
|
||||||
|
|
||||||
Configure the gateway with the following environment variables before startup:
|
|
||||||
|
|
||||||
| Variable | Required | Description |
|
|
||||||
| ------------------------------ | -------- | --------------------------------------------------------------------------------------------------------- |
|
|
||||||
| `STEP_CA_URL` | Yes | Base URL of the step-ca instance, e.g. `https://step-ca:9000` (use `https://localhost:9000` in local dev) |
|
|
||||||
| `STEP_CA_PROVISIONER_KEY_JSON` | Yes | JSON-encoded JWK from `/home/step/secrets/mosaic-fed.json` |
|
|
||||||
| `STEP_CA_ROOT_CERT_PATH` | Yes | Absolute path to the root CA certificate (e.g. `/tmp/step-ca-root.crt`) |
|
|
||||||
| `BETTER_AUTH_SECRET` | Yes | Secret used to seal peer private keys at rest; already required for M1 |
|
|
||||||
|
|
||||||
Example environment setup:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
export STEP_CA_URL="https://localhost:9000"
|
|
||||||
export STEP_CA_PROVISIONER_KEY_JSON="$(cat /tmp/step-ca-provisioner.json)"
|
|
||||||
export STEP_CA_ROOT_CERT_PATH="/tmp/step-ca-root.crt"
|
|
||||||
export BETTER_AUTH_SECRET="<your-secret>"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
### Port conflicts
|
### Port conflicts
|
||||||
|
|||||||
@@ -63,21 +63,21 @@ Goal: Two federated-tier gateways stood up on Portainer at `mos-test-1.woltje.co
|
|||||||
|
|
||||||
Goal: An admin can create a federation grant; counterparty enrolls; cert is signed by Step-CA with SAN OIDs for `grantId` + `subjectUserId`. No runtime federation traffic flows yet (that's M3).
|
Goal: An admin can create a federation grant; counterparty enrolls; cert is signed by Step-CA with SAN OIDs for `grantId` + `subjectUserId`. No runtime federation traffic flows yet (that's M3).
|
||||||
|
|
||||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||||
| --------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------------------- | ---------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------------------- | ---------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
| FED-M2-01 | done | DB migration: `federation_grants`, `federation_peers`, `federation_audit_log` tables + enum types (`grant_status`, `peer_state`). Drizzle schema + migration generation; migration tests. | #461 | sonnet | feat/federation-m2-schema | — | 5K | Shipped in PR #486. DESC indexes + reserved cols added after first review; migration tests green. |
|
| FED-M2-01 | needs-qa | DB migration: `federation_grants`, `federation_peers`, `federation_audit_log` tables + enum types (`grant_status`, `peer_state`). Drizzle schema + migration generation; migration tests. | #461 | sonnet | feat/federation-m2-schema | — | 5K | PR #486 open. First review NEEDS CHANGES (missing DESC indexes + reserved cols). Remediation subagent `a673dd9355dc26f82` in flight in worktree `agent-a4404ac1`. |
|
||||||
| FED-M2-02 | done | Add Step-CA sidecar to `docker-compose.federated.yml`: official `smallstep/step-ca` image, persistent CA volume, JWK provisioner config baked into init script. | #461 | sonnet | feat/federation-m2-stepca | DEPLOY-02 | 4K | Shipped in PR #494. Profile-gated under `federated`; CA password from secret; dev compose uses dev-only password file. |
|
| FED-M2-02 | not-started | Add Step-CA sidecar to `docker-compose.federated.yml`: official `smallstep/step-ca` image, persistent CA volume, JWK provisioner config baked into init script. | #461 | sonnet | feat/federation-m2-stepca | DEPLOY-02 | 4K | Profile-gated under `federated`. CA password from secret; dev compose uses dev-only password file. |
|
||||||
| FED-M2-03 | done | Scope JSON schema + validator: `resources` allowlist, `excluded_resources`, `include_teams`, `include_personal`, `max_rows_per_query`. Vitest unit tests for valid + invalid scopes. | #461 | sonnet | feat/federation-m2-scope-schema | — | 4K | Shipped in PR #496 (bundled with grants service). Validator independent of CA; reusable from grant CRUD + M3 scope enforcement. |
|
| FED-M2-03 | not-started | Scope JSON schema + validator: `resources` allowlist, `excluded_resources`, `include_teams`, `include_personal`, `max_rows_per_query`. Vitest unit tests for valid + invalid scopes. | #461 | sonnet | feat/federation-m2-scope-schema | — | 4K | Validator independent of CA — reusable from grant CRUD + (later) M3 scope enforcement. |
|
||||||
| FED-M2-04 | done | `apps/gateway/src/federation/ca.service.ts`: Step-CA client (CSR submission, OID-bearing cert retrieval). Mocked + integration tests against real Step-CA container. | #461 | sonnet | feat/federation-m2-ca-service | M2-02 | 6K | Shipped in PR #494. SAN OIDs 1.3.6.1.4.1.99999.1 (grantId) + 1.3.6.1.4.1.99999.2 (subjectUserId); integration test asserts both OIDs present in issued cert. |
|
| FED-M2-04 | not-started | `apps/gateway/src/federation/ca.service.ts`: Step-CA client (CSR submission, OID-bearing cert retrieval). Mocked + integration tests against real Step-CA container. | #461 | sonnet | feat/federation-m2-ca-service | M2-02 | 6K | SAN OIDs: `grantId` (custom OID 1.3.6.1.4.1.99999.1) + `subjectUserId` (1.3.6.1.4.1.99999.2). Document OID assignments in PRD/SETUP. **Acceptance**: must (a) wire `federation.tpl` template into `mosaic-fed` provisioner config and (b) include a unit/integration test asserting issued certs contain BOTH OIDs — fails-loud guard against silent OID stripping (carry-forward from M2-02 review). |
|
||||||
| FED-M2-05 | done | Sealed storage for `client_key_pem` reusing existing `provider_credentials` sealing key. Tests prove DB-at-rest is ciphertext, not PEM. Key rotation path documented (deferred impl). | #461 | sonnet | feat/federation-m2-key-sealing | M2-01 | 5K | Shipped in PR #495. Crypto seam isolated; tests confirm ciphertext-at-rest; key rotation deferred to M6. |
|
| FED-M2-05 | not-started | Sealed storage for `client_key_pem` reusing existing `provider_credentials` sealing key. Tests prove DB-at-rest is ciphertext, not PEM. Key rotation path documented (deferred impl). | #461 | sonnet | feat/federation-m2-key-sealing | M2-01 | 5K | Separate from M2-06 to keep crypto seam isolated; reviewer focus is sealing only. |
|
||||||
| FED-M2-06 | done | `grants.service.ts`: CRUD + status transitions (`pending` → `active` → `revoked`); integrates M2-03 (scope) + M2-05 (sealing). Unit tests cover all transitions including invalid ones. | #461 | sonnet | feat/federation-m2-grants-service | M2-03, M2-05 | 6K | Shipped in PR #496. All status transitions covered; invalid transition tests green; revocation handler deferred to M6. |
|
| FED-M2-06 | not-started | `grants.service.ts`: CRUD + status transitions (`pending` → `active` → `revoked`); integrates M2-03 (scope) + M2-05 (sealing). Unit tests cover all transitions including invalid ones. | #461 | sonnet | feat/federation-m2-grants-service | M2-03, M2-05 | 6K | Business logic only — CSR + cert work delegated to M2-04. Revocation handler is M6. |
|
||||||
| FED-M2-07 | done | `enrollment.controller.ts`: short-lived single-use token endpoint; CSR signing; updates grant `pending` → `active`; emits enrollment audit (table-only write, M4 tightens). | #461 | sonnet | feat/federation-m2-enrollment | M2-04, M2-06 | 6K | Shipped in PR #497. Tokens single-use with 410 on replay; TTL 15min; rate-limited at request layer. |
|
| FED-M2-07 | not-started | `enrollment.controller.ts`: short-lived single-use token endpoint; CSR signing; updates grant `pending` → `active`; emits enrollment audit (table-only write, M4 tightens). | #461 | sonnet | feat/federation-m2-enrollment | M2-04, M2-06 | 6K | Tokens single-use with 410 on replay; tokens TTL'd at 15min; rate-limited at request layer (M4 introduces guard, M2 uses simple lock). |
|
||||||
| FED-M2-08 | done | Admin CLI: `mosaic federation grant create/list/show` + `peer add/list`. Integration with grants.service (no API duplication). Help output + machine-readable JSON option. | #461 | sonnet | feat/federation-m2-cli | M2-06, M2-07 | 7K | Shipped in PR #498. `peer add <enrollment-url>` client-side flow; JSON output flag; admin REST controller co-shipped. |
|
| FED-M2-08 | not-started | Admin CLI: `mosaic federation grant create/list/show` + `peer add/list`. Integration with grants.service (no API duplication). Help output + machine-readable JSON option. | #461 | sonnet | feat/federation-m2-cli | M2-06, M2-07 | 7K | `peer add <enrollment-url>` is the client-side flow; resolves enrollment URL → CSR → store sealed key + cert. |
|
||||||
| FED-M2-09 | done | Integration tests covering MILESTONES.md M2 acceptance tests #1, #2, #3, #5, #7, #8 (single-gateway suite). Real Step-CA container; vitest profile gated by `FEDERATED_INTEGRATION=1`. | #461 | sonnet | feat/federation-m2-integration | M2-08 | 8K | Shipped in PR #499. All 6 acceptance tests green; gated by FEDERATED_INTEGRATION=1. |
|
| FED-M2-09 | not-started | Integration tests covering MILESTONES.md M2 acceptance tests #1, #2, #3, #5, #7, #8 (single-gateway suite). Real Step-CA container; vitest profile gated by `FEDERATED_INTEGRATION=1`. | #461 | sonnet | feat/federation-m2-integration | M2-08 | 8K | Tests #4 (cert OID match) + #6 (two-gateway peer-add) handled separately by M2-10 (E2E). |
|
||||||
| FED-M2-10 | done | E2E test against deployed mos-test-1 + mos-test-2 (or local two-gateway docker-compose if Portainer not ready): MILESTONES test #6 `peer add` yields `active` peer record with valid cert + key. | #461 | sonnet | feat/federation-m2-e2e | M2-08, DEPLOY-04 | 6K | Shipped in PR #500. Local two-gateway docker-compose path used; `peer add` yields active peer with valid cert + sealed key. |
|
| FED-M2-10 | not-started | E2E test against deployed mos-test-1 + mos-test-2 (or local two-gateway docker-compose if Portainer not ready): MILESTONES test #6 `peer add` yields `active` peer record with valid cert + key. | #461 | sonnet | feat/federation-m2-e2e | M2-08, DEPLOY-04 | 6K | Falls back to local docker-compose-two-gateways if remote test hosts not yet available. Documents both paths. |
|
||||||
| FED-M2-11 | done | Independent security review (sonnet, not author of M2-04/05/06/07): focus on single-use token replay, sealing leak surfaces, OID match enforcement, scope schema bypass paths. | #461 | sonnet | feat/federation-m2-security-review | M2-10 | 8K | Shipped in PR #501. Two-round review; enrollment-token replay, OID-spoofing CSR, and key leak in error messages all verified and hardened. |
|
| FED-M2-11 | not-started | Independent security review (sonnet, not author of M2-04/05/06/07): focus on single-use token replay, sealing leak surfaces, OID match enforcement, scope schema bypass paths. | #461 | sonnet | feat/federation-m2-security-review | M2-10 | 8K | Apply M1 two-round pattern. Reviewer should explicitly attempt enrollment-token replay, OID-spoofing CSR, and key leak in error messages. |
|
||||||
| FED-M2-12 | done | Docs update: `docs/federation/SETUP.md` Step-CA section; new `docs/federation/ADMIN-CLI.md` with grant/peer commands; scope schema reference; OID registration note. Runbook still M7-deferred. | #461 | haiku | feat/federation-m2-docs | M2-11 | 4K | Shipped in PR #502. SETUP.md CA bootstrap section added; ADMIN-CLI.md created; scope schema reference and OID note included. |
|
| FED-M2-12 | not-started | Docs update: `docs/federation/SETUP.md` Step-CA section; new `docs/federation/ADMIN-CLI.md` with grant/peer commands; scope schema reference; OID registration note. Runbook still M7-deferred. | #461 | haiku | feat/federation-m2-docs | M2-11 | 4K | Adds CA bootstrap section to SETUP.md with `docker compose --profile federated up step-ca` example. |
|
||||||
| FED-M2-13 | done | PR aggregate close, CI green, merge to main, close #461. Release tag `fed-v0.2.0-m2`. Mark deploy stream complete. Update mission manifest M2 row. | #461 | sonnet | chore/federation-m2-close | M2-12 | 3K | Release tag `fed-v0.2.0-m2` created; issue #461 closed; all M2 PRs #494–#502 merged to main. |
|
| FED-M2-13 | not-started | PR aggregate close, CI green, merge to main, close #461. Release tag `fed-v0.2.0-m2`. Mark deploy stream complete. Update mission manifest M2 row. | #461 | sonnet | feat/federation-m2-close | M2-12 | 3K | Same close pattern as M1-12; queue-guard before merge; tea release-create with notes including deploy-stream PRs. |
|
||||||
|
|
||||||
**M2 code workstream estimate:** ~72K tokens (vs MILESTONES.md 30K — same over-budget pattern as M1, where per-task breakdown including tests/review/docs catches the real cost).
|
**M2 code workstream estimate:** ~72K tokens (vs MILESTONES.md 30K — same over-budget pattern as M1, where per-task breakdown including tests/review/docs catches the real cost).
|
||||||
|
|
||||||
@@ -85,38 +85,7 @@ Goal: An admin can create a federation grant; counterparty enrolls; cert is sign
|
|||||||
|
|
||||||
## Milestone 3 — mTLS handshake + list/get + scope enforcement (FED-M3)
|
## Milestone 3 — mTLS handshake + list/get + scope enforcement (FED-M3)
|
||||||
|
|
||||||
Goal: Two federated gateways exchange real data over mTLS. Inbound requests pass through cert validation → grant lookup → scope enforcement → native RBAC → response. `list`, `get`, and `capabilities` verbs land. The federation E2E harness (`tools/federation-harness/`) is the new permanent test bed for M3+ and is gated on every milestone going forward.
|
_Deferred. Issue #462._
|
||||||
|
|
||||||
> **Critical trust boundary.** Every 401/403 path needs a test. Code review is non-negotiable; M3-12 budgets two review rounds.
|
|
||||||
>
|
|
||||||
> **Tracking issue:** #462.
|
|
||||||
|
|
||||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
|
||||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------------ | ---------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
||||||
| FED-M3-01 | not-started | `packages/types/src/federation/` — request/response DTOs for `list`, `get`, `capabilities` verbs. Wire-format zod schemas + inferred TS types. Includes `FederationRequest`, `FederationListResponse<T>`, `FederationGetResponse<T>`, `FederationCapabilitiesResponse`, error envelope, `_source` tag. | #462 | sonnet | feat/federation-m3-types | — | 4K | Reusable from gateway server + client + harness. Pure types — no I/O, no NestJS. |
|
|
||||||
| FED-M3-02 | not-started | `tools/federation-harness/` scaffold: `docker-compose.two-gateways.yml` (Server A + Server B + step-CA), `seed.ts` (provisions grants, peers, sample tasks/notes/credentials per scope variant), `harness.ts` helper (boots stack, returns typed clients). README documents harness use. | #462 | sonnet | feat/federation-m3-harness | DEPLOY-04 (soft) | 8K | Falls back to local docker-compose if `mos-test-1/-2` not yet redeployed (DEPLOY chain blocked on IMG-FIX). Permanent test infra used by M3+. |
|
|
||||||
| FED-M3-03 | not-started | `apps/gateway/src/federation/server/federation-auth.guard.ts` (NestJS guard). Validates inbound client cert from Fastify TLS context, extracts `grantId` + `subjectUserId` from custom OIDs, loads grant from DB, asserts `status='active'`, attaches `FederationContext` to request. | #462 | sonnet | feat/federation-m3-auth-guard | M3-01 | 8K | Reuses OID parsing logic mirrored from `ca.service.ts` post-issuance verification. 401 on malformed/missing OIDs; 403 on revoked/expired/missing grant. |
|
|
||||||
| FED-M3-04 | not-started | `apps/gateway/src/federation/server/scope.service.ts`. Pipeline: (1) resource allowlist + excluded check, (2) native RBAC eval as `subjectUserId`, (3) scope filter intersection (`include_teams`, `include_personal`), (4) `max_rows_per_query` cap. Pure service — DB calls injected. | #462 | sonnet | feat/federation-m3-scope-service | M3-01 | 10K | Hardest correctness target in M3. Reuses `parseFederationScope` (M2-03). Returns either `{ allowed: true, filter }` or structured deny reason for audit. |
|
|
||||||
| FED-M3-05 | not-started | `apps/gateway/src/federation/server/verbs/list.controller.ts`. Wires AuthGuard → ScopeService → tasks/notes/memory query layer; applies row cap; tags rows with `_source`. Resource selector via path param. | #462 | sonnet | feat/federation-m3-verb-list | M3-03, M3-04 | 6K | Routes: `POST /api/federation/v1/list/:resource`. No body persistence. Audit write deferred to M4. |
|
|
||||||
| FED-M3-06 | not-started | `apps/gateway/src/federation/server/verbs/get.controller.ts`. Single-resource fetch by id; same pipeline as list. 404 on not-found, 403 on RBAC/scope deny — both audited the same way. | #462 | sonnet | feat/federation-m3-verb-get | M3-03, M3-04 | 6K | `POST /api/federation/v1/get/:resource/:id`. Mirrors list controller patterns. |
|
|
||||||
| FED-M3-07 | not-started | `apps/gateway/src/federation/server/verbs/capabilities.controller.ts`. Read-only enumeration: returns `{ resources, excluded_resources, max_rows_per_query, supported_verbs }` derived from grant scope. Always allowed for an active grant — no RBAC eval. | #462 | sonnet | feat/federation-m3-verb-capabilities | M3-03 | 4K | `GET /api/federation/v1/capabilities`. Smallest verb; useful sanity check that mTLS + auth guard work end-to-end. |
|
|
||||||
| FED-M3-08 | not-started | `apps/gateway/src/federation/client/federation-client.service.ts`. Outbound mTLS dialer: picks `(certPem, sealed clientKey)` from `federation_peers`, unwraps key, builds undici Agent with mTLS, calls peer verb, parses typed response, wraps non-2xx into `FederationClientError`. | #462 | sonnet | feat/federation-m3-client | M3-01 | 8K | Independent of server stream — can land in parallel with M3-03/04. Cert/key cached per-peer; flushed by future M5/M6 logic. |
|
|
||||||
| FED-M3-09 | not-started | `apps/gateway/src/federation/client/query-source.service.ts`. Accepts `source: "local" \| "federated:<host>" \| "all"` from gateway query layer; for `"all"` fans out to local + each peer in parallel; merges results; tags every row with `_source`. | #462 | sonnet | feat/federation-m3-query-source | M3-08 | 8K | Per-peer failure surfaces as `_partial: true` in response, not hard failure (sets up M5 offline UX). M5 adds caching + circuit breaker on top. |
|
|
||||||
| FED-M3-10 | not-started | Integration tests for MILESTONES.md M3 acceptance #6 (malformed OIDs → 401; valid cert + revoked grant → 403) and #7 (`max_rows_per_query` cap). Real PG, mocked TLS context (Fastify req shim). | #462 | sonnet | feat/federation-m3-integration | M3-05, M3-06 | 8K | Vitest profile gated by `FEDERATED_INTEGRATION=1`. Single-gateway suite; no harness required. |
|
|
||||||
| FED-M3-11 | not-started | E2E tests for MILESTONES.md M3 acceptance #1, #2, #3, #4, #5, #8, #9, #10 (8 cases). Uses harness from M3-02; two real gateways, real Step-CA, real mTLS. Each test asserts both happy-path response and audit/no-persist invariants. | #462 | sonnet | feat/federation-m3-e2e | M3-02, M3-09 | 12K | Largest single task. Each acceptance gets its own `it(...)` for clear failure attribution. |
|
|
||||||
| FED-M3-12 | not-started | Independent security review (sonnet, not author of M3-03/04/05/06/07/08/09): focus on cert-SAN spoofing, OID extraction edge cases, scope-bypass via filter manipulation, RBAC-bypass via subjectUser swap, response leakage when scope deny. | #462 | sonnet | feat/federation-m3-security-review | M3-11 | 10K | Two review rounds budgeted. PRD requires explicit test for every 401/403 path — review verifies coverage. |
|
|
||||||
| FED-M3-13 | not-started | Docs update: `docs/federation/SETUP.md` mTLS handshake section, new `docs/federation/HARNESS.md` for federation-harness usage, OID reference table in SETUP.md, scope enforcement pipeline diagram. Runbook still M7-deferred. | #462 | haiku | feat/federation-m3-docs | M3-12 | 5K | One ASCII diagram for the auth-guard → scope → RBAC pipeline; helps future reviewers reason about denial paths. |
|
|
||||||
| FED-M3-14 | not-started | PR aggregate close, CI green, merge to main, close #462. Release tag `fed-v0.3.0-m3`. Update mission manifest M3 row → done; M4 row → in-progress when work begins. | #462 | sonnet | chore/federation-m3-close | M3-13 | 3K | Same close pattern as M1-12 / M2-13. |
|
|
||||||
|
|
||||||
**M3 estimate:** ~100K tokens (vs MILESTONES.md 40K — same per-task breakdown pattern as M1/M2: tests, review, and docs split out from implementation cost). Largest milestone in the federation mission.
|
|
||||||
|
|
||||||
**Parallelization opportunities:**
|
|
||||||
|
|
||||||
- M3-08 (client) can land in parallel with M3-03/M3-04 (server pipeline) — they only share DTOs from M3-01.
|
|
||||||
- M3-02 (harness) can land in parallel with everything except M3-11.
|
|
||||||
- M3-05/M3-06/M3-07 (verbs) are independent of each other once M3-03/M3-04 land.
|
|
||||||
|
|
||||||
**Test bed fallback:** If `mos-test-1.woltje.com` / `mos-test-2.woltje.com` are still blocked on `FED-M2-DEPLOY-IMG-FIX` when M3-11 is ready to run, the harness's local `docker-compose.two-gateways.yml` is a sufficient stand-in. Production-host validation moves to M7 acceptance suite (PRD AC-12).
|
|
||||||
|
|
||||||
## Milestone 4 — search + audit + rate limit (FED-M4)
|
## Milestone 4 — search + audit + rate limit (FED-M4)
|
||||||
|
|
||||||
|
|||||||
@@ -612,44 +612,3 @@ Independent security review surfaced three high-impact and four medium findings;
|
|||||||
7. DEPLOY-03/04 acceptance probes (`mosaic gateway doctor --json`, pgvector `vector(3)` round-trip)
|
7. DEPLOY-03/04 acceptance probes (`mosaic gateway doctor --json`, pgvector `vector(3)` round-trip)
|
||||||
8. DEPLOY-05: author `docs/federation/TEST-INFRA.md`
|
8. DEPLOY-05: author `docs/federation/TEST-INFRA.md`
|
||||||
9. M2-02 (Step-CA sidecar) kicks off after image health is green
|
9. M2-02 (Step-CA sidecar) kicks off after image health is green
|
||||||
|
|
||||||
### Session 23 — 2026-04-21 — M2 close + M3 decomposition
|
|
||||||
|
|
||||||
**Closed at compaction boundary:** all 13 M2 tasks done, PRs #494–#503 merged to `main`, tag `fed-v0.2.0-m2` published, Gitea release notes posted, issue #461 closed. Main at `4ece6dc6`.
|
|
||||||
|
|
||||||
**M2 hardening landed in PR #501** (security review remediation):
|
|
||||||
|
|
||||||
- CRIT-1: post-issuance OID verification in `ca.service.ts` (rejects cert if `mosaic_grant_id` / `mosaic_subject_user_id` extensions missing or mismatched)
|
|
||||||
- CRIT-2: atomic activation guard `WHERE status='pending'` on grant + `WHERE state='pending'` on peer; throws `ConflictException` if lost race
|
|
||||||
- HIGH-2: removed try/catch fallback in `extractCertNotAfter` — parse failures propagate as 500 (no silent 90-day default)
|
|
||||||
- HIGH-4: token slice for logging (`${token.slice(0, 8)}...`) — no full token in stdout
|
|
||||||
- HIGH-5: `redeem()` wrapped in try/catch with best-effort failure audit; uses `null` (not `'unknown'`) for nullable UUID FK fallback
|
|
||||||
- MED-3: `createToken` validates `grant.peerId === dto.peerId`; `BadRequestException` on mismatch
|
|
||||||
|
|
||||||
**Remaining M2 security findings deferred to M3+:**
|
|
||||||
|
|
||||||
- HIGH-1: peerId/subjectUserId tenancy validation on `createGrant` (M3 ScopeService work surfaces this)
|
|
||||||
- HIGH-3: Step-CA cert SHA-256 fingerprint pinning (M5 cert handling)
|
|
||||||
- MED-1: token entropy already 32 bytes — wontfix
|
|
||||||
- MED-2: per-route rate limit on enrollment endpoint (M4 rate limit work)
|
|
||||||
- MED-4: CSR CN binding to peer's commonName (M3 AuthGuard work)
|
|
||||||
|
|
||||||
**M3 decomposition landed in this session:**
|
|
||||||
|
|
||||||
- 14 tasks (M3-01..M3-14), ~100K estimate
|
|
||||||
- Structure mirrors M1/M2 pattern: foundation → server stream + client stream + harness in parallel → integration → E2E → security review → docs → close
|
|
||||||
- M3-02 ships local two-gateway docker-compose (`tools/federation-harness/`) so M3-11 E2E is not blocked on the Portainer test bed (which is still blocked on `FED-M2-DEPLOY-IMG-FIX`)
|
|
||||||
|
|
||||||
**Subagent doctrine retained from M2:**
|
|
||||||
|
|
||||||
- All worker subagents use `isolation: "worktree"` to prevent branch-race incidents
|
|
||||||
- Code review is independent (different subagent, no overlap with author of work)
|
|
||||||
- `tea pr create --repo mosaicstack/stack --login mosaicstack` is the working PR-create path; `pr-create.sh` has shell-quoting bugs (followup #45 if not already filed)
|
|
||||||
- Cost tier: foundational implementation = sonnet, docs = haiku, complex multi-file architecture (security review, scope service) = sonnet with two review rounds
|
|
||||||
|
|
||||||
**Next concrete step:**
|
|
||||||
|
|
||||||
1. PR for the M3 planning artifact (this commit) — branch `docs/federation-m3-planning`
|
|
||||||
2. After merge, kickoff M3-01 (DTOs) on `feat/federation-m3-types` with sonnet subagent in worktree
|
|
||||||
3. Once M3-01 lands, fan out: M3-02 (harness) || M3-03 (AuthGuard) → M3-04 (ScopeService) || M3-08 (FederationClient)
|
|
||||||
4. Re-converge at M3-10 (Integration) → M3-11 (E2E)
|
|
||||||
|
|||||||
@@ -30,7 +30,6 @@ export default tseslint.config(
|
|||||||
'apps/gateway/vitest.config.ts',
|
'apps/gateway/vitest.config.ts',
|
||||||
'packages/storage/vitest.config.ts',
|
'packages/storage/vitest.config.ts',
|
||||||
'packages/mosaic/__tests__/*.ts',
|
'packages/mosaic/__tests__/*.ts',
|
||||||
'tools/federation-harness/*.ts',
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,254 +0,0 @@
|
|||||||
# Federation Test Harness
|
|
||||||
|
|
||||||
Local two-gateway federation test infrastructure for Mosaic Stack M3+.
|
|
||||||
|
|
||||||
This harness boots two real gateway instances (`gateway-a`, `gateway-b`) on a
|
|
||||||
shared Docker bridge network, each backed by its own Postgres (pgvector) +
|
|
||||||
Valkey, sharing a single Step-CA. It is the test bed for all M3+ federation
|
|
||||||
E2E tests.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
- Docker with Compose v2 (`docker compose version` ≥ 2.20)
|
|
||||||
- pnpm (for running via repo scripts)
|
|
||||||
- `infra/step-ca/dev-password` must exist (copy from `infra/step-ca/dev-password.example`)
|
|
||||||
|
|
||||||
## Network Topology
|
|
||||||
|
|
||||||
```
|
|
||||||
Host machine
|
|
||||||
├── localhost:14001 → gateway-a (Server A — home / requesting)
|
|
||||||
├── localhost:14002 → gateway-b (Server B — work / serving)
|
|
||||||
├── localhost:15432 → postgres-a
|
|
||||||
├── localhost:15433 → postgres-b
|
|
||||||
├── localhost:16379 → valkey-a
|
|
||||||
├── localhost:16380 → valkey-b
|
|
||||||
└── localhost:19000 → step-ca (shared CA)
|
|
||||||
|
|
||||||
Docker network: fed-test-net (bridge)
|
|
||||||
gateway-a ←──── mTLS ────→ gateway-b
|
|
||||||
↘ ↗
|
|
||||||
step-ca
|
|
||||||
```
|
|
||||||
|
|
||||||
Ports are chosen to avoid collision with the base dev stack (5433, 6380, 14242, 9000).
|
|
||||||
|
|
||||||
## Starting the Harness
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# From repo root
|
|
||||||
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d
|
|
||||||
|
|
||||||
# Wait for all services to be healthy (~60-90s on first boot due to NestJS cold start)
|
|
||||||
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml ps
|
|
||||||
```
|
|
||||||
|
|
||||||
## Seeding Test Data
|
|
||||||
|
|
||||||
The seed script provisions three grant scope variants (A, B, C) and walks the
|
|
||||||
full enrollment flow so Server A ends up with active peers pointing at Server B.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Assumes stack is already running
|
|
||||||
pnpm tsx tools/federation-harness/seed.ts
|
|
||||||
|
|
||||||
# Or boot + seed in one step
|
|
||||||
pnpm tsx tools/federation-harness/seed.ts --boot
|
|
||||||
```
|
|
||||||
|
|
||||||
### Scope Variants
|
|
||||||
|
|
||||||
| Variant | Resources | Filters | Excluded | Purpose |
|
|
||||||
| ------- | ------------------ | ---------------------------------- | ----------- | ------------------------------- |
|
|
||||||
| A | tasks, notes | include_personal: true | (none) | Personal data federation |
|
|
||||||
| B | tasks | include_teams: ['T1'], no personal | (none) | Team-scoped, no personal |
|
|
||||||
| C | tasks, credentials | include_personal: true | credentials | Sanity: excluded wins over list |
|
|
||||||
|
|
||||||
## Using from Vitest
|
|
||||||
|
|
||||||
```ts
|
|
||||||
import {
|
|
||||||
bootHarness,
|
|
||||||
tearDownHarness,
|
|
||||||
serverA,
|
|
||||||
serverB,
|
|
||||||
seed,
|
|
||||||
} from '../../tools/federation-harness/harness.js';
|
|
||||||
import type { HarnessHandle } from '../../tools/federation-harness/harness.js';
|
|
||||||
|
|
||||||
let handle: HarnessHandle;
|
|
||||||
|
|
||||||
beforeAll(async () => {
|
|
||||||
handle = await bootHarness();
|
|
||||||
}, 180_000); // allow 3 min for Docker pull + NestJS cold start
|
|
||||||
|
|
||||||
afterAll(async () => {
|
|
||||||
await tearDownHarness(handle);
|
|
||||||
});
|
|
||||||
|
|
||||||
test('variant A: list tasks returns personal tasks', async () => {
|
|
||||||
// NOTE: Only 'all' is supported for now — per-variant narrowing is M3-11.
|
|
||||||
const seedResult = await seed(handle, 'all');
|
|
||||||
const a = serverA(handle);
|
|
||||||
|
|
||||||
const res = await fetch(`${a.baseUrl}/api/federation/tasks`, {
|
|
||||||
headers: { 'x-federation-grant': seedResult.grants.variantA.id },
|
|
||||||
});
|
|
||||||
expect(res.status).toBe(200);
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note:** `seed()` bootstraps a fresh admin user on each gateway via
|
|
||||||
> `POST /api/bootstrap/setup`. Both gateways must have zero users (pristine DB).
|
|
||||||
> If either gateway already has users, `seed()` throws with a clear error.
|
|
||||||
> Reset state with `docker compose down -v`.
|
|
||||||
|
|
||||||
The `bootHarness()` function is **idempotent**: if both gateways are already
|
|
||||||
healthy, it reuses the running stack and returns `ownedStack: false`. Tests
|
|
||||||
should not call `tearDownHarness` when `ownedStack` is false unless they
|
|
||||||
explicitly want to shut down a shared stack.
|
|
||||||
|
|
||||||
## Vitest Config (pnpm test:federation)
|
|
||||||
|
|
||||||
Add to `vitest.config.ts` at repo root (or a dedicated config):
|
|
||||||
|
|
||||||
```ts
|
|
||||||
// vitest.federation.config.ts
|
|
||||||
import { defineConfig } from 'vitest/config';
|
|
||||||
|
|
||||||
export default defineConfig({
|
|
||||||
test: {
|
|
||||||
include: ['**/*.federation.test.ts'],
|
|
||||||
testTimeout: 60_000,
|
|
||||||
hookTimeout: 180_000,
|
|
||||||
reporters: ['verbose'],
|
|
||||||
},
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
Then add to root `package.json`:
|
|
||||||
|
|
||||||
```json
|
|
||||||
"test:federation": "vitest run --config vitest.federation.config.ts"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Nuking State
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Remove containers AND volumes (ephemeral state — CA keys, DBs, everything)
|
|
||||||
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml down -v
|
|
||||||
```
|
|
||||||
|
|
||||||
On next `up`, Step-CA re-initialises from scratch and generates new CA keys.
|
|
||||||
|
|
||||||
## Step-CA Root Certificate
|
|
||||||
|
|
||||||
The CA root lives in the `fed-harness-step-ca` Docker volume at
|
|
||||||
`/home/step/certs/root_ca.crt`. To extract it to the host:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run --rm \
|
|
||||||
-v fed-harness-step-ca:/home/step \
|
|
||||||
alpine cat /home/step/certs/root_ca.crt > /tmp/fed-harness-root-ca.crt
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Port conflicts
|
|
||||||
|
|
||||||
Default host ports: 14001, 14002, 15432, 15433, 16379, 16380, 19000.
|
|
||||||
Override via environment variables before `docker compose up`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
GATEWAY_A_HOST_PORT=14101 GATEWAY_B_HOST_PORT=14102 \
|
|
||||||
docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d
|
|
||||||
```
|
|
||||||
|
|
||||||
### Image pull failures
|
|
||||||
|
|
||||||
The gateway image is digest-pinned to:
|
|
||||||
|
|
||||||
```
|
|
||||||
git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
|
|
||||||
```
|
|
||||||
|
|
||||||
(sha-9f1a081, post-#491 IMG-FIX)
|
|
||||||
|
|
||||||
If the registry is unreachable, Docker will use the locally cached image if
|
|
||||||
present. If no local image exists, the compose up will fail with a pull error.
|
|
||||||
In that case:
|
|
||||||
|
|
||||||
1. Ensure you can reach `git.mosaicstack.dev` (VPN, DNS, etc.).
|
|
||||||
2. Log in: `docker login git.mosaicstack.dev`
|
|
||||||
3. Pull manually: `docker pull git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02`
|
|
||||||
|
|
||||||
### NestJS cold start
|
|
||||||
|
|
||||||
Gateway containers take 40–60 seconds to become healthy on first boot (Node.js
|
|
||||||
module resolution + NestJS DI bootstrap). The `start_period: 60s` in the
|
|
||||||
compose healthcheck covers this. `bootHarness()` polls for up to 3 minutes.
|
|
||||||
|
|
||||||
### Step-CA startup
|
|
||||||
|
|
||||||
Step-CA initialises on first boot (generates CA keys). This takes ~5-10s.
|
|
||||||
The `start_period: 30s` in the healthcheck covers it. Both gateways wait for
|
|
||||||
Step-CA to be healthy before starting (`depends_on: step-ca: condition: service_healthy`).
|
|
||||||
|
|
||||||
### dev-password missing
|
|
||||||
|
|
||||||
The Step-CA container requires `infra/step-ca/dev-password` to be mounted.
|
|
||||||
Copy the example and set a local password:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cp infra/step-ca/dev-password.example infra/step-ca/dev-password
|
|
||||||
# Edit the file to set your preferred dev CA password
|
|
||||||
```
|
|
||||||
|
|
||||||
The file is `.gitignore`d — do not commit it.
|
|
||||||
|
|
||||||
## Image Digest Note
|
|
||||||
|
|
||||||
The gateway image is pinned to `sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02`
|
|
||||||
(sha-9f1a081). This is the digest promoted by PR #491 (IMG-FIX). The `latest`
|
|
||||||
tag is forbidden per Mosaic image policy. When a new gateway build is promoted,
|
|
||||||
update the digest in `docker-compose.two-gateways.yml` and in this file.
|
|
||||||
|
|
||||||
## Known Limitations
|
|
||||||
|
|
||||||
### BETTER_AUTH_URL enrollment URL bug (upstream production code — not yet fixed)
|
|
||||||
|
|
||||||
`apps/gateway/src/federation/federation.controller.ts:145` constructs the
|
|
||||||
enrollment URL using `process.env['BETTER_AUTH_URL'] ?? 'http://localhost:14242'`.
|
|
||||||
This is an upstream bug: `BETTER_AUTH_URL` is the Better Auth origin (typically
|
|
||||||
the web app), not the gateway's own base URL. In non-harness deployments this
|
|
||||||
produces an enrollment URL pointing to the wrong host or port.
|
|
||||||
|
|
||||||
**How the harness handles this:**
|
|
||||||
|
|
||||||
1. **In-cluster calls (container-to-container):** The compose file sets
|
|
||||||
`BETTER_AUTH_URL: 'http://gateway-b:3000'` so the enrollment URL returned by
|
|
||||||
the gateway uses the Docker internal hostname. This lets other containers in the
|
|
||||||
`fed-test-net` network resolve and reach Server B's enrollment endpoint.
|
|
||||||
|
|
||||||
2. **Host-side URL rewrite (seed script):** The `seed.ts` script runs on the host
|
|
||||||
machine where `gateway-b` is not a resolvable hostname. Before calling
|
|
||||||
`fetch(enrollmentUrl, ...)`, the seed script rewrites the URL: it extracts only
|
|
||||||
the token path segment from `enrollmentUrl` and reassembles the URL using the
|
|
||||||
host-accessible `serverBUrl` (default: `http://localhost:14002`). This lets the
|
|
||||||
seed script redeem enrollment tokens from the host without being affected by the
|
|
||||||
in-cluster hostname in the returned URL.
|
|
||||||
|
|
||||||
**TODO:** Fix `federation.controller.ts` to derive the enrollment URL from its own
|
|
||||||
listening address (e.g. `GATEWAY_BASE_URL` env var or a dedicated
|
|
||||||
`FEDERATION_ENROLLMENT_BASE_URL` env var) rather than reusing `BETTER_AUTH_URL`.
|
|
||||||
Tracked as a follow-up to PR #505 — do not bundle with harness changes.
|
|
||||||
|
|
||||||
## Permanent Infrastructure
|
|
||||||
|
|
||||||
This harness is designed to outlive M3 and be reused by M4+ milestone tests.
|
|
||||||
It is not a throwaway scaffold — treat it as production test infrastructure:
|
|
||||||
|
|
||||||
- Keep it idempotent.
|
|
||||||
- Do not hardcode test assumptions in the harness layer (put them in tests).
|
|
||||||
- Update the seed script when new scope variants are needed.
|
|
||||||
- The README and harness should be kept in sync as the federation API evolves.
|
|
||||||
@@ -1,247 +0,0 @@
|
|||||||
# tools/federation-harness/docker-compose.two-gateways.yml
|
|
||||||
#
|
|
||||||
# Two-gateway federation test harness — local-only, no Portainer/Swarm needed.
|
|
||||||
#
|
|
||||||
# USAGE (manual):
|
|
||||||
# docker compose -f tools/federation-harness/docker-compose.two-gateways.yml up -d
|
|
||||||
# docker compose -f tools/federation-harness/docker-compose.two-gateways.yml down -v
|
|
||||||
#
|
|
||||||
# USAGE (from harness.ts):
|
|
||||||
# const handle = await bootHarness();
|
|
||||||
# ...
|
|
||||||
# await tearDownHarness(handle);
|
|
||||||
#
|
|
||||||
# TOPOLOGY:
|
|
||||||
# gateway-a — "home" instance (Server A, the requesting side)
|
|
||||||
# └── postgres-a (pgvector/pg17, port 15432)
|
|
||||||
# └── valkey-a (port 16379)
|
|
||||||
# gateway-b — "work" instance (Server B, the serving side)
|
|
||||||
# └── postgres-b (pgvector/pg17, port 15433)
|
|
||||||
# └── valkey-b (port 16380)
|
|
||||||
# step-ca — shared CA for both gateways (port 19000)
|
|
||||||
#
|
|
||||||
# All services share the `fed-test-net` bridge network.
|
|
||||||
# Host port ranges (15432-15433, 16379-16380, 14001-14002, 19000) are chosen
|
|
||||||
# to avoid collision with the base dev stack (5433, 6380, 14242, 9000).
|
|
||||||
#
|
|
||||||
# IMAGE:
|
|
||||||
# Pinned to the immutable digest sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
|
|
||||||
# (sha-9f1a081, post-#491 IMG-FIX, smoke-tested locally).
|
|
||||||
# Update this digest only after a new CI build is promoted to the registry.
|
|
||||||
#
|
|
||||||
# STEP-CA:
|
|
||||||
# Single shared Step-CA instance. Both gateways connect to it.
|
|
||||||
# CA volume is ephemeral per `docker compose down -v`; regenerated on next up.
|
|
||||||
# The harness seed script provisions the CA roots cross-trust after first boot.
|
|
||||||
|
|
||||||
services:
|
|
||||||
# ─── Shared Certificate Authority ────────────────────────────────────────────
|
|
||||||
step-ca:
|
|
||||||
image: smallstep/step-ca:0.27.4
|
|
||||||
container_name: fed-harness-step-ca
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${STEP_CA_HOST_PORT:-19000}:9000'
|
|
||||||
volumes:
|
|
||||||
- step_ca_data:/home/step
|
|
||||||
- ../../infra/step-ca/init.sh:/usr/local/bin/mosaic-step-ca-init.sh:ro
|
|
||||||
- ../../infra/step-ca/templates:/etc/step-ca-templates:ro
|
|
||||||
- ../../infra/step-ca/dev-password:/run/secrets/ca_password:ro
|
|
||||||
entrypoint: ['/bin/sh', '/usr/local/bin/mosaic-step-ca-init.sh']
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test:
|
|
||||||
[
|
|
||||||
'CMD',
|
|
||||||
'step',
|
|
||||||
'ca',
|
|
||||||
'health',
|
|
||||||
'--ca-url',
|
|
||||||
'https://localhost:9000',
|
|
||||||
'--root',
|
|
||||||
'/home/step/certs/root_ca.crt',
|
|
||||||
]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
start_period: 30s
|
|
||||||
|
|
||||||
# ─── Server A — Home / Requesting Gateway ────────────────────────────────────
|
|
||||||
postgres-a:
|
|
||||||
image: pgvector/pgvector:pg17
|
|
||||||
container_name: fed-harness-postgres-a
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${PG_A_HOST_PORT:-15432}:5432'
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: mosaic
|
|
||||||
POSTGRES_PASSWORD: mosaic
|
|
||||||
POSTGRES_DB: mosaic
|
|
||||||
volumes:
|
|
||||||
- pg_a_data:/var/lib/postgresql/data
|
|
||||||
- ../../infra/pg-init:/docker-entrypoint-initdb.d:ro
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD-SHELL', 'pg_isready -U mosaic']
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
valkey-a:
|
|
||||||
image: valkey/valkey:8-alpine
|
|
||||||
container_name: fed-harness-valkey-a
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${VALKEY_A_HOST_PORT:-16379}:6379'
|
|
||||||
volumes:
|
|
||||||
- valkey_a_data:/data
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD', 'valkey-cli', 'ping']
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
gateway-a:
|
|
||||||
image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
|
|
||||||
# Tag for human reference: sha-9f1a081 (post-#491 IMG-FIX; smoke-tested locally)
|
|
||||||
container_name: fed-harness-gateway-a
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${GATEWAY_A_HOST_PORT:-14001}:3000'
|
|
||||||
environment:
|
|
||||||
MOSAIC_TIER: federated
|
|
||||||
DATABASE_URL: postgres://mosaic:mosaic@postgres-a:5432/mosaic
|
|
||||||
VALKEY_URL: redis://valkey-a:6379
|
|
||||||
GATEWAY_PORT: '3000'
|
|
||||||
GATEWAY_CORS_ORIGIN: 'http://localhost:14001'
|
|
||||||
BETTER_AUTH_SECRET: harness-secret-server-a-do-not-use-in-prod
|
|
||||||
BETTER_AUTH_URL: 'http://gateway-a:3000'
|
|
||||||
STEP_CA_URL: 'https://step-ca:9000'
|
|
||||||
FEDERATION_PEER_HOSTNAME: gateway-a
|
|
||||||
# Bootstrap password for POST /api/bootstrap/setup — used by seed.ts to create
|
|
||||||
# the first admin user. Only valid on a pristine (zero-user) database.
|
|
||||||
# Not the same as ADMIN_API_KEY — there is no static API key in the gateway.
|
|
||||||
ADMIN_BOOTSTRAP_PASSWORD: harness-admin-password-a
|
|
||||||
depends_on:
|
|
||||||
postgres-a:
|
|
||||||
condition: service_healthy
|
|
||||||
valkey-a:
|
|
||||||
condition: service_healthy
|
|
||||||
step-ca:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test:
|
|
||||||
[
|
|
||||||
'CMD',
|
|
||||||
'node',
|
|
||||||
'-e',
|
|
||||||
"require('http').get('http://127.0.0.1:3000/api/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
|
|
||||||
]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
start_period: 60s
|
|
||||||
|
|
||||||
# ─── Server B — Work / Serving Gateway ──────────────────────────────────────
|
|
||||||
postgres-b:
|
|
||||||
image: pgvector/pgvector:pg17
|
|
||||||
container_name: fed-harness-postgres-b
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${PG_B_HOST_PORT:-15433}:5432'
|
|
||||||
environment:
|
|
||||||
POSTGRES_USER: mosaic
|
|
||||||
POSTGRES_PASSWORD: mosaic
|
|
||||||
POSTGRES_DB: mosaic
|
|
||||||
volumes:
|
|
||||||
- pg_b_data:/var/lib/postgresql/data
|
|
||||||
- ../../infra/pg-init:/docker-entrypoint-initdb.d:ro
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD-SHELL', 'pg_isready -U mosaic']
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
valkey-b:
|
|
||||||
image: valkey/valkey:8-alpine
|
|
||||||
container_name: fed-harness-valkey-b
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${VALKEY_B_HOST_PORT:-16380}:6379'
|
|
||||||
volumes:
|
|
||||||
- valkey_b_data:/data
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD', 'valkey-cli', 'ping']
|
|
||||||
interval: 5s
|
|
||||||
timeout: 3s
|
|
||||||
retries: 5
|
|
||||||
|
|
||||||
gateway-b:
|
|
||||||
image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
|
|
||||||
# Tag for human reference: sha-9f1a081 (post-#491 IMG-FIX; smoke-tested locally)
|
|
||||||
container_name: fed-harness-gateway-b
|
|
||||||
restart: unless-stopped
|
|
||||||
ports:
|
|
||||||
- '${GATEWAY_B_HOST_PORT:-14002}:3000'
|
|
||||||
environment:
|
|
||||||
MOSAIC_TIER: federated
|
|
||||||
DATABASE_URL: postgres://mosaic:mosaic@postgres-b:5432/mosaic
|
|
||||||
VALKEY_URL: redis://valkey-b:6379
|
|
||||||
GATEWAY_PORT: '3000'
|
|
||||||
GATEWAY_CORS_ORIGIN: 'http://localhost:14002'
|
|
||||||
BETTER_AUTH_SECRET: harness-secret-server-b-do-not-use-in-prod
|
|
||||||
BETTER_AUTH_URL: 'http://gateway-b:3000'
|
|
||||||
STEP_CA_URL: 'https://step-ca:9000'
|
|
||||||
FEDERATION_PEER_HOSTNAME: gateway-b
|
|
||||||
# Bootstrap password for POST /api/bootstrap/setup — used by seed.ts to create
|
|
||||||
# the first admin user. Only valid on a pristine (zero-user) database.
|
|
||||||
# Not the same as ADMIN_API_KEY — there is no static API key in the gateway.
|
|
||||||
ADMIN_BOOTSTRAP_PASSWORD: harness-admin-password-b
|
|
||||||
depends_on:
|
|
||||||
postgres-b:
|
|
||||||
condition: service_healthy
|
|
||||||
valkey-b:
|
|
||||||
condition: service_healthy
|
|
||||||
step-ca:
|
|
||||||
condition: service_healthy
|
|
||||||
networks:
|
|
||||||
- fed-test-net
|
|
||||||
healthcheck:
|
|
||||||
test:
|
|
||||||
[
|
|
||||||
'CMD',
|
|
||||||
'node',
|
|
||||||
'-e',
|
|
||||||
"require('http').get('http://127.0.0.1:3000/api/health', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
|
|
||||||
]
|
|
||||||
interval: 10s
|
|
||||||
timeout: 5s
|
|
||||||
retries: 5
|
|
||||||
start_period: 60s
|
|
||||||
|
|
||||||
networks:
|
|
||||||
fed-test-net:
|
|
||||||
name: fed-test-net
|
|
||||||
driver: bridge
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
step_ca_data:
|
|
||||||
name: fed-harness-step-ca
|
|
||||||
pg_a_data:
|
|
||||||
name: fed-harness-pg-a
|
|
||||||
valkey_a_data:
|
|
||||||
name: fed-harness-valkey-a
|
|
||||||
pg_b_data:
|
|
||||||
name: fed-harness-pg-b
|
|
||||||
valkey_b_data:
|
|
||||||
name: fed-harness-valkey-b
|
|
||||||
@@ -1,290 +0,0 @@
|
|||||||
/**
|
|
||||||
* tools/federation-harness/harness.ts
|
|
||||||
*
|
|
||||||
* Vitest-consumable helpers for the two-gateway federation harness.
|
|
||||||
*
|
|
||||||
* USAGE (in a vitest test file):
|
|
||||||
*
|
|
||||||
* import { bootHarness, tearDownHarness, serverA, serverB, seed } from
|
|
||||||
* '../../tools/federation-harness/harness.js';
|
|
||||||
*
|
|
||||||
* let handle: HarnessHandle;
|
|
||||||
*
|
|
||||||
* beforeAll(async () => {
|
|
||||||
* handle = await bootHarness();
|
|
||||||
* }, 180_000);
|
|
||||||
*
|
|
||||||
* afterAll(async () => {
|
|
||||||
* await tearDownHarness(handle);
|
|
||||||
* });
|
|
||||||
*
|
|
||||||
* test('variant A — list tasks', async () => {
|
|
||||||
* const seedResult = await seed(handle, 'all');
|
|
||||||
* const a = serverA(handle);
|
|
||||||
* const res = await fetch(`${a.baseUrl}/api/federation/list/tasks`, {
|
|
||||||
* headers: { Authorization: `Bearer ${seedResult.adminTokenA}` },
|
|
||||||
* });
|
|
||||||
* expect(res.status).toBe(200);
|
|
||||||
* });
|
|
||||||
*
|
|
||||||
* NOTE: The `seed()` helper currently only supports scenario='all'. Passing any
|
|
||||||
* other value throws immediately. Per-variant narrowing is deferred to M3-11.
|
|
||||||
*
|
|
||||||
* ESM / NodeNext: all imports use .js extensions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { execSync, execFileSync } from 'node:child_process';
|
|
||||||
import { resolve, dirname } from 'node:path';
|
|
||||||
import { fileURLToPath } from 'node:url';
|
|
||||||
import { runSeed, type SeedResult } from './seed.js';
|
|
||||||
|
|
||||||
// ─── Types ───────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
export interface GatewayAccessor {
|
|
||||||
/** Base URL reachable from the host machine, e.g. http://localhost:14001 */
|
|
||||||
baseUrl: string;
|
|
||||||
/** Bootstrap password used for POST /api/bootstrap/setup on a pristine gateway */
|
|
||||||
bootstrapPassword: string;
|
|
||||||
/** Internal Docker network hostname (for container-to-container calls) */
|
|
||||||
internalHostname: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface HarnessHandle {
|
|
||||||
/** Server A accessor */
|
|
||||||
a: GatewayAccessor;
|
|
||||||
/** Server B accessor */
|
|
||||||
b: GatewayAccessor;
|
|
||||||
/** Absolute path to the docker-compose file */
|
|
||||||
composeFile: string;
|
|
||||||
/** Whether this instance booted the stack (vs. reusing an existing one) */
|
|
||||||
ownedStack: boolean;
|
|
||||||
/** Optional seed result if seed() was called */
|
|
||||||
seedResult?: SeedResult;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scenario to seed. Currently only 'all' is implemented; per-variant narrowing
|
|
||||||
* is tracked as M3-11. Passing any other value throws immediately with a clear
|
|
||||||
* error rather than silently over-seeding.
|
|
||||||
*/
|
|
||||||
export type SeedScenario = 'variantA' | 'variantB' | 'variantC' | 'all';
|
|
||||||
|
|
||||||
// ─── Constants ────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
||||||
const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
|
|
||||||
|
|
||||||
const GATEWAY_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
|
|
||||||
const GATEWAY_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
|
|
||||||
const ADMIN_BOOTSTRAP_PASSWORD_A =
|
|
||||||
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
|
|
||||||
const ADMIN_BOOTSTRAP_PASSWORD_B =
|
|
||||||
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
|
|
||||||
|
|
||||||
const READINESS_TIMEOUT_MS = 180_000;
|
|
||||||
const READINESS_POLL_MS = 3_000;
|
|
||||||
|
|
||||||
// ─── Internal helpers ─────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
async function isGatewayHealthy(baseUrl: string): Promise<boolean> {
|
|
||||||
try {
|
|
||||||
const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) });
|
|
||||||
return res.ok;
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Poll both gateways in parallel until both are healthy or the shared deadline
|
|
||||||
* expires. Polling in parallel (rather than sequentially) avoids the bug where
|
|
||||||
* a slow gateway-a consumes all of the readiness budget before gateway-b is
|
|
||||||
* checked.
|
|
||||||
*/
|
|
||||||
async function waitForStack(handle: HarnessHandle): Promise<void> {
|
|
||||||
const gateways: Array<{ label: string; url: string }> = [
|
|
||||||
{ label: 'gateway-a', url: handle.a.baseUrl },
|
|
||||||
{ label: 'gateway-b', url: handle.b.baseUrl },
|
|
||||||
];
|
|
||||||
|
|
||||||
await Promise.all(
|
|
||||||
gateways.map(async (gw) => {
|
|
||||||
// Each gateway gets its own independent deadline.
|
|
||||||
const deadline = Date.now() + READINESS_TIMEOUT_MS;
|
|
||||||
process.stdout.write(`[harness] Waiting for ${gw.label}...`);
|
|
||||||
|
|
||||||
while (Date.now() < deadline) {
|
|
||||||
if (await isGatewayHealthy(gw.url)) {
|
|
||||||
process.stdout.write(` ready\n`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (Date.now() + READINESS_POLL_MS > deadline) {
|
|
||||||
throw new Error(
|
|
||||||
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
|
|
||||||
process.stdout.write('.');
|
|
||||||
}
|
|
||||||
|
|
||||||
throw new Error(
|
|
||||||
`[harness] ${gw.label} did not become healthy within ${READINESS_TIMEOUT_MS.toString()}ms`,
|
|
||||||
);
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function isStackRunning(): boolean {
|
|
||||||
try {
|
|
||||||
const output = execFileSync(
|
|
||||||
'docker',
|
|
||||||
['compose', '-f', COMPOSE_FILE, 'ps', '--format', 'json'],
|
|
||||||
{ encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] },
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!output.trim()) return false;
|
|
||||||
|
|
||||||
// Parse JSON lines — each running service emits a JSON object per line
|
|
||||||
const lines = output.trim().split('\n').filter(Boolean);
|
|
||||||
const runningServices = lines.filter((line) => {
|
|
||||||
try {
|
|
||||||
const obj = JSON.parse(line) as { State?: string };
|
|
||||||
return obj.State === 'running';
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Expect at least gateway-a and gateway-b running
|
|
||||||
return runningServices.length >= 2;
|
|
||||||
} catch {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Public API ───────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Boot the harness stack.
|
|
||||||
*
|
|
||||||
* Idempotent: if the stack is already running and both gateways are healthy,
|
|
||||||
* this function reuses the existing stack and returns a handle with
|
|
||||||
* `ownedStack: false`. Callers that set `ownedStack: false` should NOT call
|
|
||||||
* `tearDownHarness` unless they explicitly want to tear down a pre-existing stack.
|
|
||||||
*
|
|
||||||
* If the stack is not running, it starts it with `docker compose up -d` and
|
|
||||||
* waits for both gateways to pass their /api/health probe.
|
|
||||||
*/
|
|
||||||
export async function bootHarness(): Promise<HarnessHandle> {
|
|
||||||
const handle: HarnessHandle = {
|
|
||||||
a: {
|
|
||||||
baseUrl: GATEWAY_A_URL,
|
|
||||||
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_A,
|
|
||||||
internalHostname: 'gateway-a',
|
|
||||||
},
|
|
||||||
b: {
|
|
||||||
baseUrl: GATEWAY_B_URL,
|
|
||||||
bootstrapPassword: ADMIN_BOOTSTRAP_PASSWORD_B,
|
|
||||||
internalHostname: 'gateway-b',
|
|
||||||
},
|
|
||||||
composeFile: COMPOSE_FILE,
|
|
||||||
ownedStack: false,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check if both gateways are already healthy
|
|
||||||
const [aHealthy, bHealthy] = await Promise.all([
|
|
||||||
isGatewayHealthy(handle.a.baseUrl),
|
|
||||||
isGatewayHealthy(handle.b.baseUrl),
|
|
||||||
]);
|
|
||||||
|
|
||||||
if (aHealthy && bHealthy) {
|
|
||||||
console.log('[harness] Stack already running — reusing existing stack.');
|
|
||||||
handle.ownedStack = false;
|
|
||||||
return handle;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('[harness] Starting federation harness stack...');
|
|
||||||
execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' });
|
|
||||||
handle.ownedStack = true;
|
|
||||||
|
|
||||||
await waitForStack(handle);
|
|
||||||
console.log('[harness] Stack is ready.');
|
|
||||||
|
|
||||||
return handle;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tear down the harness stack.
|
|
||||||
*
|
|
||||||
* Runs `docker compose down -v` to remove containers AND volumes (ephemeral state).
|
|
||||||
* Only tears down if `handle.ownedStack` is true unless `force` is set.
|
|
||||||
*/
|
|
||||||
export async function tearDownHarness(
|
|
||||||
handle: HarnessHandle,
|
|
||||||
opts?: { force?: boolean },
|
|
||||||
): Promise<void> {
|
|
||||||
if (!handle.ownedStack && !opts?.force) {
|
|
||||||
console.log(
|
|
||||||
'[harness] Stack not owned by this handle — skipping teardown (pass force: true to override).',
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log('[harness] Tearing down federation harness stack...');
|
|
||||||
execSync(`docker compose -f "${handle.composeFile}" down -v`, { stdio: 'inherit' });
|
|
||||||
console.log('[harness] Stack torn down.');
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the Server A accessor from a harness handle.
|
|
||||||
* Convenience wrapper for test readability.
|
|
||||||
*/
|
|
||||||
export function serverA(handle: HarnessHandle): GatewayAccessor {
|
|
||||||
return handle.a;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the Server B accessor from a harness handle.
|
|
||||||
* Convenience wrapper for test readability.
|
|
||||||
*/
|
|
||||||
export function serverB(handle: HarnessHandle): GatewayAccessor {
|
|
||||||
return handle.b;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Seed the harness with test data for one or more scenarios.
|
|
||||||
*
|
|
||||||
* @param handle The harness handle returned by bootHarness().
|
|
||||||
* @param scenario Which scope variants to provision. Currently only 'all' is
|
|
||||||
* supported — passing any other value throws immediately with a
|
|
||||||
* clear error. Per-variant narrowing is tracked as M3-11.
|
|
||||||
*
|
|
||||||
* Returns a SeedResult with grant IDs, peer IDs, and admin tokens for each
|
|
||||||
* gateway, which test assertions can reference.
|
|
||||||
*
|
|
||||||
* IMPORTANT: The harness assumes a pristine database on both gateways. The seed
|
|
||||||
* bootstraps an admin user on each gateway via POST /api/bootstrap/setup. If
|
|
||||||
* either gateway already has users, seed() throws with a clear error message.
|
|
||||||
* Run 'docker compose down -v' to reset state.
|
|
||||||
*/
|
|
||||||
export async function seed(
|
|
||||||
handle: HarnessHandle,
|
|
||||||
scenario: SeedScenario = 'all',
|
|
||||||
): Promise<SeedResult> {
|
|
||||||
if (scenario !== 'all') {
|
|
||||||
throw new Error(
|
|
||||||
`seed: scenario narrowing not yet implemented; pass "all" for now. ` +
|
|
||||||
`Got: "${scenario}". Per-variant narrowing is tracked as M3-11.`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await runSeed({
|
|
||||||
serverAUrl: handle.a.baseUrl,
|
|
||||||
serverBUrl: handle.b.baseUrl,
|
|
||||||
adminBootstrapPasswordA: handle.a.bootstrapPassword,
|
|
||||||
adminBootstrapPasswordB: handle.b.bootstrapPassword,
|
|
||||||
});
|
|
||||||
|
|
||||||
handle.seedResult = result;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
@@ -1,603 +0,0 @@
|
|||||||
#!/usr/bin/env tsx
|
|
||||||
/**
|
|
||||||
* tools/federation-harness/seed.ts
|
|
||||||
*
|
|
||||||
* Provisions test data for the two-gateway federation harness.
|
|
||||||
* Run via: tsx tools/federation-harness/seed.ts
|
|
||||||
*
|
|
||||||
* What this script does:
|
|
||||||
* 1. (Optional) Boots the compose stack if --boot flag is passed.
|
|
||||||
* 2. Waits for both gateways to be healthy.
|
|
||||||
* 3. Bootstraps an admin user + token on each gateway via POST /api/bootstrap/setup.
|
|
||||||
* 4. Creates three grants on Server B matching the M3 acceptance test scenarios:
|
|
||||||
* - Scope variant A: tasks + notes, include_personal: true
|
|
||||||
* - Scope variant B: tasks only, include_teams: ['T1'], exclude T2
|
|
||||||
* - Scope variant C: tasks + credentials in resources, credentials excluded (sanity)
|
|
||||||
* 5. For each grant, walks the full enrollment flow:
|
|
||||||
* a. Server B creates a peer keypair (represents the requesting side).
|
|
||||||
* b. Server B creates the grant referencing that peer.
|
|
||||||
* c. Server B issues an enrollment token.
|
|
||||||
* d. Server A creates its own peer keypair (represents its view of B).
|
|
||||||
* e. Server A redeems the enrollment token at Server B's enrollment endpoint,
|
|
||||||
* submitting A's CSR → receives signed cert back.
|
|
||||||
* f. Server A stores the cert on its peer record → peer becomes active.
|
|
||||||
* 6. Inserts representative test tasks/notes/credentials on Server B.
|
|
||||||
*
|
|
||||||
* IMPORTANT: This script uses the real admin REST API — no direct DB writes.
|
|
||||||
* It exercises the full enrollment flow as M3 acceptance tests will.
|
|
||||||
*
|
|
||||||
* ESM / NodeNext: all imports use .js extensions.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { execSync } from 'node:child_process';
|
|
||||||
import { resolve, dirname } from 'node:path';
|
|
||||||
import { fileURLToPath } from 'node:url';
|
|
||||||
|
|
||||||
// ─── Constants ───────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
||||||
const COMPOSE_FILE = resolve(__dirname, 'docker-compose.two-gateways.yml');
|
|
||||||
|
|
||||||
/** Base URLs as seen from the host machine (mapped host ports). */
|
|
||||||
const SERVER_A_URL = process.env['GATEWAY_A_URL'] ?? 'http://localhost:14001';
|
|
||||||
const SERVER_B_URL = process.env['GATEWAY_B_URL'] ?? 'http://localhost:14002';
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Bootstrap passwords used when calling POST /api/bootstrap/setup on each
|
|
||||||
* gateway. Each gateway starts with zero users and requires a one-time setup
|
|
||||||
* call before any admin-guarded endpoints can be used.
|
|
||||||
*/
|
|
||||||
const ADMIN_BOOTSTRAP_PASSWORD_A =
|
|
||||||
process.env['ADMIN_BOOTSTRAP_PASSWORD_A'] ?? 'harness-admin-password-a';
|
|
||||||
const ADMIN_BOOTSTRAP_PASSWORD_B =
|
|
||||||
process.env['ADMIN_BOOTSTRAP_PASSWORD_B'] ?? 'harness-admin-password-b';
|
|
||||||
|
|
||||||
const READINESS_TIMEOUT_MS = 120_000;
|
|
||||||
const READINESS_POLL_MS = 3_000;
|
|
||||||
|
|
||||||
// ─── Scope variant definitions (for M3 acceptance tests) ─────────────────────
|
|
||||||
|
|
||||||
/** Scope variant A — tasks + notes, personal data included. */
|
|
||||||
export const SCOPE_VARIANT_A = {
|
|
||||||
resources: ['tasks', 'notes'],
|
|
||||||
filters: {
|
|
||||||
tasks: { include_personal: true },
|
|
||||||
notes: { include_personal: true },
|
|
||||||
},
|
|
||||||
excluded_resources: [] as string[],
|
|
||||||
max_rows_per_query: 500,
|
|
||||||
};
|
|
||||||
|
|
||||||
/** Scope variant B — tasks only, team T1 only, no personal. */
|
|
||||||
export const SCOPE_VARIANT_B = {
|
|
||||||
resources: ['tasks'],
|
|
||||||
filters: {
|
|
||||||
tasks: { include_teams: ['T1'], include_personal: false },
|
|
||||||
},
|
|
||||||
excluded_resources: [] as string[],
|
|
||||||
max_rows_per_query: 500,
|
|
||||||
};
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Scope variant C — tasks + credentials in resources list, but credentials
|
|
||||||
* explicitly in excluded_resources. Sanity test: credentials must still be
|
|
||||||
* inaccessible even though they appear in resources.
|
|
||||||
*/
|
|
||||||
export const SCOPE_VARIANT_C = {
|
|
||||||
resources: ['tasks', 'credentials'],
|
|
||||||
filters: {
|
|
||||||
tasks: { include_personal: true },
|
|
||||||
},
|
|
||||||
excluded_resources: ['credentials'],
|
|
||||||
max_rows_per_query: 500,
|
|
||||||
};
|
|
||||||
|
|
||||||
// ─── Inline types (no import from packages/types — M3-01 branch not yet merged) ─
|
|
||||||
|
|
||||||
interface AdminFetchOptions {
|
|
||||||
method?: string;
|
|
||||||
body?: unknown;
|
|
||||||
adminToken: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface PeerRecord {
|
|
||||||
peerId: string;
|
|
||||||
csrPem: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface GrantRecord {
|
|
||||||
id: string;
|
|
||||||
status: string;
|
|
||||||
scope: unknown;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface EnrollmentTokenResult {
|
|
||||||
token: string;
|
|
||||||
expiresAt: string;
|
|
||||||
enrollmentUrl: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface EnrollmentRedeemResult {
|
|
||||||
certPem: string;
|
|
||||||
certChainPem: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
interface BootstrapResult {
|
|
||||||
adminUserId: string;
|
|
||||||
adminToken: string;
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface SeedResult {
|
|
||||||
serverAUrl: string;
|
|
||||||
serverBUrl: string;
|
|
||||||
adminTokenA: string;
|
|
||||||
adminTokenB: string;
|
|
||||||
adminUserIdA: string;
|
|
||||||
adminUserIdB: string;
|
|
||||||
grants: {
|
|
||||||
variantA: GrantRecord;
|
|
||||||
variantB: GrantRecord;
|
|
||||||
variantC: GrantRecord;
|
|
||||||
};
|
|
||||||
peers: {
|
|
||||||
variantA: PeerRecord & { grantId: string };
|
|
||||||
variantB: PeerRecord & { grantId: string };
|
|
||||||
variantC: PeerRecord & { grantId: string };
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── HTTP helpers ─────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Authenticated admin fetch. Sends `Authorization: Bearer <adminToken>` which
|
|
||||||
* is the only path supported by AdminGuard (DB-backed sha256 token lookup).
|
|
||||||
* No `x-admin-key` header path exists in the gateway.
|
|
||||||
*/
|
|
||||||
async function adminFetch<T>(baseUrl: string, path: string, opts: AdminFetchOptions): Promise<T> {
|
|
||||||
const url = `${baseUrl}${path}`;
|
|
||||||
const res = await fetch(url, {
|
|
||||||
method: opts.method ?? 'GET',
|
|
||||||
headers: {
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
Authorization: `Bearer ${opts.adminToken}`,
|
|
||||||
},
|
|
||||||
body: opts.body !== undefined ? JSON.stringify(opts.body) : undefined,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!res.ok) {
|
|
||||||
const text = await res.text().catch(() => '(no body)');
|
|
||||||
throw new Error(`${opts.method ?? 'GET'} ${url} → ${res.status}: ${text}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.json() as Promise<T>;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Admin bootstrap ──────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Bootstrap an admin user on a pristine gateway.
|
|
||||||
*
|
|
||||||
* Steps:
|
|
||||||
* 1. GET /api/bootstrap/status — confirms needsSetup === true.
|
|
||||||
* 2. POST /api/bootstrap/setup with { name, email, password } — returns
|
|
||||||
* { user, token: { plaintext } }.
|
|
||||||
*
|
|
||||||
* The harness assumes a fresh DB. If needsSetup is false the harness fails
|
|
||||||
* fast with a clear error rather than proceeding with an unknown token.
|
|
||||||
*/
|
|
||||||
async function bootstrapAdmin(
|
|
||||||
baseUrl: string,
|
|
||||||
label: string,
|
|
||||||
password: string,
|
|
||||||
): Promise<BootstrapResult> {
|
|
||||||
console.log(`[seed] Bootstrapping admin on ${label} (${baseUrl})...`);
|
|
||||||
|
|
||||||
// 1. Check status
|
|
||||||
const statusRes = await fetch(`${baseUrl}/api/bootstrap/status`);
|
|
||||||
if (!statusRes.ok) {
|
|
||||||
throw new Error(`[seed] GET ${baseUrl}/api/bootstrap/status → ${statusRes.status.toString()}`);
|
|
||||||
}
|
|
||||||
const status = (await statusRes.json()) as { needsSetup: boolean };
|
|
||||||
|
|
||||||
if (!status.needsSetup) {
|
|
||||||
throw new Error(
|
|
||||||
`[seed] ${label} at ${baseUrl} already has users (needsSetup=false). ` +
|
|
||||||
`The harness requires a pristine database. Run 'docker compose down -v' to reset.`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Bootstrap
|
|
||||||
const setupRes = await fetch(`${baseUrl}/api/bootstrap/setup`, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({
|
|
||||||
name: `Harness Admin (${label})`,
|
|
||||||
email: `harness-admin-${label.toLowerCase().replace(/\s+/g, '-')}@example.invalid`,
|
|
||||||
password,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!setupRes.ok) {
|
|
||||||
const body = await setupRes.text().catch(() => '(no body)');
|
|
||||||
throw new Error(
|
|
||||||
`[seed] POST ${baseUrl}/api/bootstrap/setup → ${setupRes.status.toString()}: ${body}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = (await setupRes.json()) as {
|
|
||||||
user: { id: string };
|
|
||||||
token: { plaintext: string };
|
|
||||||
};
|
|
||||||
|
|
||||||
console.log(`[seed] ${label} admin user: ${result.user.id}`);
|
|
||||||
console.log(`[seed] ${label} admin token: ${result.token.plaintext.slice(0, 8)}...`);
|
|
||||||
|
|
||||||
return {
|
|
||||||
adminUserId: result.user.id,
|
|
||||||
adminToken: result.token.plaintext,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Readiness probe ──────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
async function waitForGateway(baseUrl: string, label: string): Promise<void> {
|
|
||||||
const deadline = Date.now() + READINESS_TIMEOUT_MS;
|
|
||||||
let lastError: string = '';
|
|
||||||
|
|
||||||
while (Date.now() < deadline) {
|
|
||||||
try {
|
|
||||||
const res = await fetch(`${baseUrl}/api/health`, { signal: AbortSignal.timeout(5_000) });
|
|
||||||
if (res.ok) {
|
|
||||||
console.log(`[seed] ${label} is ready (${baseUrl})`);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
lastError = `HTTP ${res.status.toString()}`;
|
|
||||||
} catch (err) {
|
|
||||||
lastError = err instanceof Error ? err.message : String(err);
|
|
||||||
}
|
|
||||||
await new Promise((r) => setTimeout(r, READINESS_POLL_MS));
|
|
||||||
}
|
|
||||||
|
|
||||||
throw new Error(
|
|
||||||
`[seed] ${label} did not become ready within ${READINESS_TIMEOUT_MS.toString()}ms — last error: ${lastError}`,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Enrollment flow ──────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Walk the full enrollment flow for one grant.
|
|
||||||
*
|
|
||||||
* The correct two-sided flow (matching the data model's FK semantics):
|
|
||||||
*
|
|
||||||
* 1. On Server B: POST /api/admin/federation/peers/keypair
|
|
||||||
* → peerId_B (Server B's peer record representing the requesting side)
|
|
||||||
* 2. On Server B: POST /api/admin/federation/grants with peerId: peerId_B
|
|
||||||
* → grant (FK to Server B's own federation_peers table — no violation)
|
|
||||||
* 3. On Server B: POST /api/admin/federation/grants/:id/tokens
|
|
||||||
* → enrollmentUrl pointing back to Server B
|
|
||||||
* 4. On Server A: POST /api/admin/federation/peers/keypair
|
|
||||||
* → peerId_A + csrPem_A (Server A's local record of Server B)
|
|
||||||
* 5. Server A → Server B: POST enrollmentUrl with { csrPem: csrPem_A }
|
|
||||||
* → certPem signed by Server B's CA
|
|
||||||
* 6. On Server A: PATCH /api/admin/federation/peers/:peerId_A/cert with certPem
|
|
||||||
* → Server A's peer record transitions to active
|
|
||||||
*
|
|
||||||
* Returns the activated grant (from Server B) and Server A's peer record.
|
|
||||||
*/
|
|
||||||
async function enrollGrant(opts: {
|
|
||||||
label: string;
|
|
||||||
subjectUserId: string;
|
|
||||||
scope: unknown;
|
|
||||||
adminTokenA: string;
|
|
||||||
adminTokenB: string;
|
|
||||||
serverAUrl: string;
|
|
||||||
serverBUrl: string;
|
|
||||||
}): Promise<{ grant: GrantRecord; peer: PeerRecord & { grantId: string } }> {
|
|
||||||
const { label, subjectUserId, scope, adminTokenA, adminTokenB, serverAUrl, serverBUrl } = opts;
|
|
||||||
console.log(`\n[seed] Enrolling grant for scope variant ${label}...`);
|
|
||||||
|
|
||||||
// 1. Create peer keypair on Server B (represents the requesting peer from B's perspective)
|
|
||||||
const peerB = await adminFetch<PeerRecord>(serverBUrl, '/api/admin/federation/peers/keypair', {
|
|
||||||
method: 'POST',
|
|
||||||
adminToken: adminTokenB,
|
|
||||||
body: {
|
|
||||||
commonName: `harness-peer-${label.toLowerCase()}-from-b`,
|
|
||||||
displayName: `Harness Peer ${label} (Server A as seen from B)`,
|
|
||||||
endpointUrl: serverAUrl,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
console.log(`[seed] Created peer on B: ${peerB.peerId}`);
|
|
||||||
|
|
||||||
// 2. Create grant on Server B referencing B's own peer record
|
|
||||||
const grant = await adminFetch<GrantRecord>(serverBUrl, '/api/admin/federation/grants', {
|
|
||||||
method: 'POST',
|
|
||||||
adminToken: adminTokenB,
|
|
||||||
body: {
|
|
||||||
peerId: peerB.peerId,
|
|
||||||
subjectUserId,
|
|
||||||
scope,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
console.log(`[seed] Created grant on B: ${grant.id} (status: ${grant.status})`);
|
|
||||||
|
|
||||||
// 3. Generate enrollment token on Server B
|
|
||||||
const tokenResult = await adminFetch<EnrollmentTokenResult>(
|
|
||||||
serverBUrl,
|
|
||||||
`/api/admin/federation/grants/${grant.id}/tokens`,
|
|
||||||
{ method: 'POST', adminToken: adminTokenB, body: { ttlSeconds: 900 } },
|
|
||||||
);
|
|
||||||
console.log(`[seed] Enrollment token: ${tokenResult.token.slice(0, 8)}...`);
|
|
||||||
console.log(`[seed] Enrollment URL: ${tokenResult.enrollmentUrl}`);
|
|
||||||
|
|
||||||
// 4. Create peer keypair on Server A (Server A's local record of Server B)
|
|
||||||
const peerA = await adminFetch<PeerRecord>(serverAUrl, '/api/admin/federation/peers/keypair', {
|
|
||||||
method: 'POST',
|
|
||||||
adminToken: adminTokenA,
|
|
||||||
body: {
|
|
||||||
commonName: `harness-peer-${label.toLowerCase()}-from-a`,
|
|
||||||
displayName: `Harness Peer ${label} (Server B as seen from A)`,
|
|
||||||
endpointUrl: serverBUrl,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
console.log(`[seed] Created peer on A: ${peerA.peerId}`);
|
|
||||||
|
|
||||||
// 5. Redeem token at Server B's enrollment endpoint with A's CSR.
|
|
||||||
// The enrollment endpoint is not admin-guarded — the one-time token IS the credential.
|
|
||||||
//
|
|
||||||
// The enrollmentUrl returned by the gateway is built using BETTER_AUTH_URL which
|
|
||||||
// resolves to the in-cluster Docker hostname (gateway-b:3000). That URL is only
|
|
||||||
// reachable from other containers, not from the host machine running this script.
|
|
||||||
// We rewrite the host portion to use the host-accessible serverBUrl so the
|
|
||||||
// seed script can reach the endpoint from the host.
|
|
||||||
const parsedEnrollment = new URL(tokenResult.enrollmentUrl);
|
|
||||||
const tokenSegment = parsedEnrollment.pathname.split('/').pop()!;
|
|
||||||
const redeemUrl = `${serverBUrl}/api/federation/enrollment/${tokenSegment}`;
|
|
||||||
console.log(`[seed] Rewritten redeem URL (host-accessible): ${redeemUrl}`);
|
|
||||||
const redeemRes = await fetch(redeemUrl, {
|
|
||||||
method: 'POST',
|
|
||||||
headers: { 'Content-Type': 'application/json' },
|
|
||||||
body: JSON.stringify({ csrPem: peerA.csrPem }),
|
|
||||||
});
|
|
||||||
|
|
||||||
if (!redeemRes.ok) {
|
|
||||||
const body = await redeemRes.text().catch(() => '(no body)');
|
|
||||||
throw new Error(`Enrollment redemption failed: ${redeemRes.status.toString()} — ${body}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const redeemResult = (await redeemRes.json()) as EnrollmentRedeemResult;
|
|
||||||
console.log(`[seed] Cert issued (${redeemResult.certPem.length.toString()} bytes)`);
|
|
||||||
|
|
||||||
// 6. Store cert on Server A's peer record → transitions to active
|
|
||||||
await adminFetch<unknown>(serverAUrl, `/api/admin/federation/peers/${peerA.peerId}/cert`, {
|
|
||||||
method: 'PATCH',
|
|
||||||
adminToken: adminTokenA,
|
|
||||||
body: { certPem: redeemResult.certPem },
|
|
||||||
});
|
|
||||||
console.log(`[seed] Cert stored on A — peer ${peerA.peerId} is now active`);
|
|
||||||
|
|
||||||
// Verify grant flipped to active on B
|
|
||||||
const activeGrant = await adminFetch<GrantRecord>(
|
|
||||||
serverBUrl,
|
|
||||||
`/api/admin/federation/grants/${grant.id}`,
|
|
||||||
{ adminToken: adminTokenB },
|
|
||||||
);
|
|
||||||
console.log(`[seed] Grant status on B: ${activeGrant.status}`);
|
|
||||||
|
|
||||||
return { grant: activeGrant, peer: { ...peerA, grantId: grant.id } };
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Test data insertion ──────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Insert representative test data on Server B via its admin APIs.
|
|
||||||
*
|
|
||||||
* NOTE: The gateway's task/note/credential APIs require an authenticated user
|
|
||||||
* session. For the harness, we seed via admin-level endpoints if available,
|
|
||||||
* or document the gap here for M3-11 to fill in with proper user session seeding.
|
|
||||||
*
|
|
||||||
* ASSUMPTION: Server B exposes POST /api/admin/tasks (or similar) for test data.
|
|
||||||
* If that endpoint does not yet exist, this function logs a warning and skips
|
|
||||||
* without failing — M3-11 will add the session-based seeding path.
|
|
||||||
*/
|
|
||||||
async function seedTestData(
|
|
||||||
subjectUserId: string,
|
|
||||||
scopeLabel: string,
|
|
||||||
serverBUrl: string,
|
|
||||||
adminTokenB: string,
|
|
||||||
): Promise<void> {
|
|
||||||
console.log(`\n[seed] Seeding test data on Server B for ${scopeLabel}...`);
|
|
||||||
|
|
||||||
const testTasks = [
|
|
||||||
{
|
|
||||||
title: `${scopeLabel} Task 1`,
|
|
||||||
description: 'Federation harness test task',
|
|
||||||
userId: subjectUserId,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
title: `${scopeLabel} Task 2`,
|
|
||||||
description: 'Team-scoped test task',
|
|
||||||
userId: subjectUserId,
|
|
||||||
teamId: 'T1',
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
const testNotes = [
|
|
||||||
{
|
|
||||||
title: `${scopeLabel} Note 1`,
|
|
||||||
content: 'Personal note for federation test',
|
|
||||||
userId: subjectUserId,
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
// Attempt to insert — tolerate 404 (endpoint not yet implemented)
|
|
||||||
for (const task of testTasks) {
|
|
||||||
try {
|
|
||||||
await adminFetch<unknown>(serverBUrl, '/api/admin/tasks', {
|
|
||||||
method: 'POST',
|
|
||||||
adminToken: adminTokenB,
|
|
||||||
body: task,
|
|
||||||
});
|
|
||||||
console.log(`[seed] Inserted task: "${task.title}"`);
|
|
||||||
} catch (err) {
|
|
||||||
const msg = err instanceof Error ? err.message : String(err);
|
|
||||||
if (msg.includes('404') || msg.includes('Cannot POST')) {
|
|
||||||
console.warn(
|
|
||||||
`[seed] WARN: /api/admin/tasks not found — skipping task insertion (expected until M3-11)`,
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const note of testNotes) {
|
|
||||||
try {
|
|
||||||
await adminFetch<unknown>(serverBUrl, '/api/admin/notes', {
|
|
||||||
method: 'POST',
|
|
||||||
adminToken: adminTokenB,
|
|
||||||
body: note,
|
|
||||||
});
|
|
||||||
console.log(`[seed] Inserted note: "${note.title}"`);
|
|
||||||
} catch (err) {
|
|
||||||
const msg = err instanceof Error ? err.message : String(err);
|
|
||||||
if (msg.includes('404') || msg.includes('Cannot POST')) {
|
|
||||||
console.warn(
|
|
||||||
`[seed] WARN: /api/admin/notes not found — skipping note insertion (expected until M3-11)`,
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
console.log(`[seed] Test data seeding for ${scopeLabel} complete.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── Main entrypoint ──────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
export async function runSeed(opts?: {
|
|
||||||
serverAUrl?: string;
|
|
||||||
serverBUrl?: string;
|
|
||||||
adminBootstrapPasswordA?: string;
|
|
||||||
adminBootstrapPasswordB?: string;
|
|
||||||
subjectUserIds?: { variantA: string; variantB: string; variantC: string };
|
|
||||||
}): Promise<SeedResult> {
|
|
||||||
const aUrl = opts?.serverAUrl ?? SERVER_A_URL;
|
|
||||||
const bUrl = opts?.serverBUrl ?? SERVER_B_URL;
|
|
||||||
const passwordA = opts?.adminBootstrapPasswordA ?? ADMIN_BOOTSTRAP_PASSWORD_A;
|
|
||||||
const passwordB = opts?.adminBootstrapPasswordB ?? ADMIN_BOOTSTRAP_PASSWORD_B;
|
|
||||||
|
|
||||||
// Use provided or default subject user IDs.
|
|
||||||
// In a real run these would be real user UUIDs from Server B's DB.
|
|
||||||
// For the harness, the admin bootstrap user on Server B is used as the subject.
|
|
||||||
// These are overridden after bootstrap if opts.subjectUserIds is not provided.
|
|
||||||
const subjectIds = opts?.subjectUserIds;
|
|
||||||
|
|
||||||
console.log('[seed] Waiting for gateways to be ready...');
|
|
||||||
await Promise.all([waitForGateway(aUrl, 'Server A'), waitForGateway(bUrl, 'Server B')]);
|
|
||||||
|
|
||||||
// Bootstrap admin users on both gateways (requires pristine DBs).
|
|
||||||
console.log('\n[seed] Bootstrapping admin accounts...');
|
|
||||||
const [bootstrapA, bootstrapB] = await Promise.all([
|
|
||||||
bootstrapAdmin(aUrl, 'Server A', passwordA),
|
|
||||||
bootstrapAdmin(bUrl, 'Server B', passwordB),
|
|
||||||
]);
|
|
||||||
|
|
||||||
// Default subject user IDs to the admin user on Server B (guaranteed to exist).
|
|
||||||
const resolvedSubjectIds = subjectIds ?? {
|
|
||||||
variantA: bootstrapB.adminUserId,
|
|
||||||
variantB: bootstrapB.adminUserId,
|
|
||||||
variantC: bootstrapB.adminUserId,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Enroll all three scope variants sequentially to avoid race conditions on
|
|
||||||
// the step-ca signing queue. Parallel enrollment would work too but
|
|
||||||
// sequential is easier to debug when something goes wrong.
|
|
||||||
console.log('\n[seed] Enrolling scope variants...');
|
|
||||||
const resultA = await enrollGrant({
|
|
||||||
label: 'A',
|
|
||||||
subjectUserId: resolvedSubjectIds.variantA,
|
|
||||||
scope: SCOPE_VARIANT_A,
|
|
||||||
adminTokenA: bootstrapA.adminToken,
|
|
||||||
adminTokenB: bootstrapB.adminToken,
|
|
||||||
serverAUrl: aUrl,
|
|
||||||
serverBUrl: bUrl,
|
|
||||||
});
|
|
||||||
const resultB = await enrollGrant({
|
|
||||||
label: 'B',
|
|
||||||
subjectUserId: resolvedSubjectIds.variantB,
|
|
||||||
scope: SCOPE_VARIANT_B,
|
|
||||||
adminTokenA: bootstrapA.adminToken,
|
|
||||||
adminTokenB: bootstrapB.adminToken,
|
|
||||||
serverAUrl: aUrl,
|
|
||||||
serverBUrl: bUrl,
|
|
||||||
});
|
|
||||||
const resultC = await enrollGrant({
|
|
||||||
label: 'C',
|
|
||||||
subjectUserId: resolvedSubjectIds.variantC,
|
|
||||||
scope: SCOPE_VARIANT_C,
|
|
||||||
adminTokenA: bootstrapA.adminToken,
|
|
||||||
adminTokenB: bootstrapB.adminToken,
|
|
||||||
serverAUrl: aUrl,
|
|
||||||
serverBUrl: bUrl,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Seed test data on Server B for each scope variant
|
|
||||||
await Promise.all([
|
|
||||||
seedTestData(resolvedSubjectIds.variantA, 'A', bUrl, bootstrapB.adminToken),
|
|
||||||
seedTestData(resolvedSubjectIds.variantB, 'B', bUrl, bootstrapB.adminToken),
|
|
||||||
seedTestData(resolvedSubjectIds.variantC, 'C', bUrl, bootstrapB.adminToken),
|
|
||||||
]);
|
|
||||||
|
|
||||||
const result: SeedResult = {
|
|
||||||
serverAUrl: aUrl,
|
|
||||||
serverBUrl: bUrl,
|
|
||||||
adminTokenA: bootstrapA.adminToken,
|
|
||||||
adminTokenB: bootstrapB.adminToken,
|
|
||||||
adminUserIdA: bootstrapA.adminUserId,
|
|
||||||
adminUserIdB: bootstrapB.adminUserId,
|
|
||||||
grants: {
|
|
||||||
variantA: resultA.grant,
|
|
||||||
variantB: resultB.grant,
|
|
||||||
variantC: resultC.grant,
|
|
||||||
},
|
|
||||||
peers: {
|
|
||||||
variantA: resultA.peer,
|
|
||||||
variantB: resultB.peer,
|
|
||||||
variantC: resultC.peer,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
console.log('\n[seed] Seed complete.');
|
|
||||||
console.log('[seed] Summary:');
|
|
||||||
console.log(` Variant A grant: ${result.grants.variantA.id} (${result.grants.variantA.status})`);
|
|
||||||
console.log(` Variant B grant: ${result.grants.variantB.id} (${result.grants.variantB.status})`);
|
|
||||||
console.log(` Variant C grant: ${result.grants.variantC.id} (${result.grants.variantC.status})`);
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ─── CLI entry ────────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
const isCli =
|
|
||||||
process.argv[1] != null &&
|
|
||||||
fileURLToPath(import.meta.url).endsWith(process.argv[1]!.split('/').pop()!);
|
|
||||||
|
|
||||||
if (isCli) {
|
|
||||||
const shouldBoot = process.argv.includes('--boot');
|
|
||||||
|
|
||||||
if (shouldBoot) {
|
|
||||||
console.log('[seed] --boot flag detected — starting compose stack...');
|
|
||||||
execSync(`docker compose -f "${COMPOSE_FILE}" up -d`, { stdio: 'inherit' });
|
|
||||||
}
|
|
||||||
|
|
||||||
runSeed()
|
|
||||||
.then(() => {
|
|
||||||
process.exit(0);
|
|
||||||
})
|
|
||||||
.catch((err) => {
|
|
||||||
console.error('[seed] Fatal:', err);
|
|
||||||
process.exit(1);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user