Compare commits
30 Commits
feat/feder
...
fix/federa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9b718d3e06 | ||
|
|
55c870f421 | ||
| 0ee5b14c68 | |||
| 3eee176cc3 | |||
| 74fe60d8d6 | |||
| 0bfaa56e9e | |||
| 01dd6b9fa1 | |||
| 1038ae76e1 | |||
| bf082d95a0 | |||
| bb24292cf7 | |||
| f2cda52e1a | |||
| 7d7cf012f0 | |||
| c56dda74aa | |||
| 9f1a08185e | |||
| d2e408656b | |||
| 54c278b871 | |||
| 4dbd429203 | |||
| b985d7bfe2 | |||
| 45e8f02c91 | |||
| 54c422ab06 | |||
|
|
b9fb8aab57 | ||
| 78841f228a | |||
| dc4afee848 | |||
| 1e2b8ac8de | |||
| 15d849c166 | |||
| 78251d4af8 | |||
| 1a4b1ebbf1 | |||
| ccad30dd27 | |||
| 4c2b177eab | |||
| 58169f9979 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -9,3 +9,6 @@ coverage
|
||||
*.tsbuildinfo
|
||||
.pnpm-store
|
||||
docs/reports/
|
||||
|
||||
# Step-CA dev password — real file is gitignored; commit only the .example
|
||||
infra/step-ca/dev-password
|
||||
|
||||
@@ -80,6 +80,8 @@ If you already have a gateway account but no token, use `mosaic gateway config r
|
||||
|
||||
### Configuration
|
||||
|
||||
Mosaic supports three storage tiers: `local` (PGlite, single-host), `standalone` (PostgreSQL, single-host), and `federated` (PostgreSQL + pgvector + Valkey, multi-host). See [Federated Tier Setup](docs/federation/SETUP.md) for multi-user and production deployments, or [Migrating to Federated](docs/guides/migrate-tier.md) to upgrade from existing tiers.
|
||||
|
||||
```bash
|
||||
mosaic config show # Print full config as JSON
|
||||
mosaic config get <key> # Read a specific key
|
||||
|
||||
@@ -56,6 +56,7 @@
|
||||
"@opentelemetry/sdk-metrics": "^2.6.0",
|
||||
"@opentelemetry/sdk-node": "^0.213.0",
|
||||
"@opentelemetry/semantic-conventions": "^1.40.0",
|
||||
"@peculiar/x509": "^2.0.0",
|
||||
"@sinclair/typebox": "^0.34.48",
|
||||
"better-auth": "^1.5.5",
|
||||
"bullmq": "^5.71.0",
|
||||
@@ -63,8 +64,11 @@
|
||||
"class-validator": "^0.15.1",
|
||||
"dotenv": "^17.3.1",
|
||||
"fastify": "^5.0.0",
|
||||
"ioredis": "^5.10.0",
|
||||
"jose": "^6.2.2",
|
||||
"node-cron": "^4.2.1",
|
||||
"openai": "^6.32.0",
|
||||
"postgres": "^3.4.8",
|
||||
"reflect-metadata": "^0.2.0",
|
||||
"rxjs": "^7.8.0",
|
||||
"socket.io": "^4.8.0",
|
||||
|
||||
@@ -0,0 +1,64 @@
|
||||
/**
|
||||
* Test B — Gateway boot refuses (fail-fast) when PG is unreachable.
|
||||
*
|
||||
* Prereq: docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
* (Valkey must be running; only PG is intentionally misconfigured.)
|
||||
* Run: FEDERATED_INTEGRATION=1 pnpm --filter @mosaicstack/gateway test src/__tests__/integration/federated-boot.pg-unreachable.integration.test.ts
|
||||
*
|
||||
* Skipped when FEDERATED_INTEGRATION !== '1'.
|
||||
*/
|
||||
|
||||
import net from 'node:net';
|
||||
import { beforeAll, describe, expect, it } from 'vitest';
|
||||
import { TierDetectionError, detectAndAssertTier } from '@mosaicstack/storage';
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
|
||||
const VALKEY_URL = 'redis://localhost:6380';
|
||||
|
||||
/**
|
||||
* Reserves a guaranteed-closed port at runtime by binding to an ephemeral OS
|
||||
* port (port 0) and immediately releasing it. The OS will not reassign the
|
||||
* port during the TIME_WAIT window, so it remains closed for the duration of
|
||||
* this test.
|
||||
*/
|
||||
async function reserveClosedPort(): Promise<number> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const server = net.createServer();
|
||||
server.listen(0, '127.0.0.1', () => {
|
||||
const addr = server.address();
|
||||
if (typeof addr !== 'object' || !addr) return reject(new Error('no addr'));
|
||||
const port = addr.port;
|
||||
server.close(() => resolve(port));
|
||||
});
|
||||
server.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
describe.skipIf(!run)('federated boot — PG unreachable', () => {
|
||||
let badPgUrl: string;
|
||||
|
||||
beforeAll(async () => {
|
||||
const closedPort = await reserveClosedPort();
|
||||
badPgUrl = `postgresql://mosaic:mosaic@localhost:${closedPort}/mosaic`;
|
||||
});
|
||||
|
||||
it('detectAndAssertTier throws TierDetectionError with service: postgres when PG is down', async () => {
|
||||
const brokenConfig = {
|
||||
tier: 'federated' as const,
|
||||
storage: {
|
||||
type: 'postgres' as const,
|
||||
url: badPgUrl,
|
||||
enableVector: true,
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: VALKEY_URL,
|
||||
},
|
||||
};
|
||||
|
||||
await expect(detectAndAssertTier(brokenConfig)).rejects.toSatisfy(
|
||||
(err: unknown) => err instanceof TierDetectionError && err.service === 'postgres',
|
||||
);
|
||||
}, 10_000);
|
||||
});
|
||||
@@ -0,0 +1,50 @@
|
||||
/**
|
||||
* Test A — Gateway boot succeeds when federated services are up.
|
||||
*
|
||||
* Prereq: docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
* Run: FEDERATED_INTEGRATION=1 pnpm --filter @mosaicstack/gateway test src/__tests__/integration/federated-boot.success.integration.test.ts
|
||||
*
|
||||
* Skipped when FEDERATED_INTEGRATION !== '1'.
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import { afterAll, describe, expect, it } from 'vitest';
|
||||
import { detectAndAssertTier } from '@mosaicstack/storage';
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
|
||||
const PG_URL = 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
||||
const VALKEY_URL = 'redis://localhost:6380';
|
||||
|
||||
const federatedConfig = {
|
||||
tier: 'federated' as const,
|
||||
storage: {
|
||||
type: 'postgres' as const,
|
||||
url: PG_URL,
|
||||
enableVector: true,
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: VALKEY_URL,
|
||||
},
|
||||
};
|
||||
|
||||
describe.skipIf(!run)('federated boot — success path', () => {
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
|
||||
afterAll(async () => {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {});
|
||||
}
|
||||
});
|
||||
|
||||
it('detectAndAssertTier resolves without throwing when federated services are up', async () => {
|
||||
await expect(detectAndAssertTier(federatedConfig)).resolves.toBeUndefined();
|
||||
}, 10_000);
|
||||
|
||||
it('pgvector extension is registered (pg_extension row exists)', async () => {
|
||||
sql = postgres(PG_URL, { max: 1, connect_timeout: 5, idle_timeout: 5 });
|
||||
const rows = await sql`SELECT * FROM pg_extension WHERE extname = 'vector'`;
|
||||
expect(rows).toHaveLength(1);
|
||||
}, 10_000);
|
||||
});
|
||||
@@ -0,0 +1,43 @@
|
||||
/**
|
||||
* Test C — pgvector extension is functional end-to-end.
|
||||
*
|
||||
* Creates a temp table with a vector(3) column, inserts a row, and queries it
|
||||
* back — confirming the extension is not just registered but operational.
|
||||
*
|
||||
* Prereq: docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
* Run: FEDERATED_INTEGRATION=1 pnpm --filter @mosaicstack/gateway test src/__tests__/integration/federated-pgvector.integration.test.ts
|
||||
*
|
||||
* Skipped when FEDERATED_INTEGRATION !== '1'.
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import { afterAll, describe, expect, it } from 'vitest';
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
|
||||
const PG_URL = 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
||||
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
|
||||
afterAll(async () => {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {});
|
||||
}
|
||||
});
|
||||
|
||||
describe.skipIf(!run)('federated pgvector — functional end-to-end', () => {
|
||||
it('vector ops round-trip: INSERT [1,2,3] and SELECT returns [1,2,3]', async () => {
|
||||
sql = postgres(PG_URL, { max: 1, connect_timeout: 5, idle_timeout: 5 });
|
||||
|
||||
await sql`CREATE TEMP TABLE t (id int, embedding vector(3))`;
|
||||
await sql`INSERT INTO t VALUES (1, '[1,2,3]')`;
|
||||
const rows = await sql`SELECT embedding FROM t`;
|
||||
|
||||
expect(rows).toHaveLength(1);
|
||||
// The postgres driver returns vector columns as strings like '[1,2,3]'.
|
||||
// Normalise by parsing the string representation.
|
||||
const raw = rows[0]?.['embedding'] as string;
|
||||
const parsed = JSON.parse(raw) as number[];
|
||||
expect(parsed).toEqual([1, 2, 3]);
|
||||
}, 10_000);
|
||||
});
|
||||
@@ -0,0 +1,243 @@
|
||||
/**
|
||||
* Federation M2 E2E test — peer-add enrollment flow (FED-M2-10).
|
||||
*
|
||||
* Covers MILESTONES.md acceptance test #6:
|
||||
* "`peer add <url>` on Server A yields an `active` peer record with a valid cert + key"
|
||||
*
|
||||
* This test simulates two gateways using a single bootstrapped NestJS app:
|
||||
* - "Server A": the admin API that generates a keypair and stores the cert
|
||||
* - "Server B": the enrollment endpoint that signs the CSR
|
||||
* Both share the same DB + Step-CA in the test environment.
|
||||
*
|
||||
* Prerequisites:
|
||||
* docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
*
|
||||
* Run:
|
||||
* FEDERATED_INTEGRATION=1 STEP_CA_AVAILABLE=1 \
|
||||
* STEP_CA_URL=https://localhost:9000 \
|
||||
* STEP_CA_PROVISIONER_KEY_JSON="$(docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json)" \
|
||||
* STEP_CA_ROOT_CERT_PATH=/tmp/step-ca-root.crt \
|
||||
* pnpm --filter @mosaicstack/gateway test \
|
||||
* src/__tests__/integration/federation-m2-e2e.integration.test.ts
|
||||
*
|
||||
* Obtaining Step-CA credentials:
|
||||
* # Extract provisioner key from running container:
|
||||
* # docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json
|
||||
* # Copy root cert from container:
|
||||
* # docker cp $(docker ps -qf name=step-ca):/home/step/certs/root_ca.crt /tmp/step-ca-root.crt
|
||||
* # Then: export STEP_CA_ROOT_CERT_PATH=/tmp/step-ca-root.crt
|
||||
*
|
||||
* Skipped unless both FEDERATED_INTEGRATION=1 and STEP_CA_AVAILABLE=1 are set.
|
||||
*/
|
||||
|
||||
import * as crypto from 'node:crypto';
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
||||
import { Test } from '@nestjs/testing';
|
||||
import { ValidationPipe } from '@nestjs/common';
|
||||
import { FastifyAdapter, type NestFastifyApplication } from '@nestjs/platform-fastify';
|
||||
import supertest from 'supertest';
|
||||
import {
|
||||
createDb,
|
||||
type Db,
|
||||
type DbHandle,
|
||||
federationPeers,
|
||||
federationGrants,
|
||||
federationEnrollmentTokens,
|
||||
inArray,
|
||||
eq,
|
||||
} from '@mosaicstack/db';
|
||||
import * as schema from '@mosaicstack/db';
|
||||
import { DB } from '../../database/database.module.js';
|
||||
import { AdminGuard } from '../../admin/admin.guard.js';
|
||||
import { FederationModule } from '../../federation/federation.module.js';
|
||||
import { GrantsService } from '../../federation/grants.service.js';
|
||||
import { EnrollmentService } from '../../federation/enrollment.service.js';
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
const stepCaRun =
|
||||
run &&
|
||||
process.env['STEP_CA_AVAILABLE'] === '1' &&
|
||||
!!process.env['STEP_CA_URL'] &&
|
||||
!!process.env['STEP_CA_PROVISIONER_KEY_JSON'] &&
|
||||
!!process.env['STEP_CA_ROOT_CERT_PATH'];
|
||||
|
||||
const PG_URL = 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
||||
|
||||
const RUN_ID = crypto.randomUUID();
|
||||
|
||||
describe.skipIf(!stepCaRun)('federation M2 E2E — peer add enrollment flow', () => {
|
||||
let handle: DbHandle;
|
||||
let db: Db;
|
||||
let app: NestFastifyApplication;
|
||||
let agent: ReturnType<typeof supertest>;
|
||||
let grantsService: GrantsService;
|
||||
let enrollmentService: EnrollmentService;
|
||||
|
||||
const createdTokenGrantIds: string[] = [];
|
||||
const createdGrantIds: string[] = [];
|
||||
const createdPeerIds: string[] = [];
|
||||
const createdUserIds: string[] = [];
|
||||
|
||||
beforeAll(async () => {
|
||||
process.env['BETTER_AUTH_SECRET'] ??= 'test-e2e-sealing-key';
|
||||
|
||||
handle = createDb(PG_URL);
|
||||
db = handle.db;
|
||||
|
||||
const moduleRef = await Test.createTestingModule({
|
||||
imports: [FederationModule],
|
||||
providers: [{ provide: DB, useValue: db }],
|
||||
})
|
||||
.overrideGuard(AdminGuard)
|
||||
.useValue({ canActivate: () => true })
|
||||
.compile();
|
||||
|
||||
app = moduleRef.createNestApplication<NestFastifyApplication>(new FastifyAdapter());
|
||||
app.useGlobalPipes(new ValidationPipe({ whitelist: true, transform: true }));
|
||||
await app.init();
|
||||
await app.getHttpAdapter().getInstance().ready();
|
||||
|
||||
agent = supertest(app.getHttpServer());
|
||||
|
||||
grantsService = moduleRef.get(GrantsService);
|
||||
enrollmentService = moduleRef.get(EnrollmentService);
|
||||
}, 30_000);
|
||||
|
||||
afterAll(async () => {
|
||||
if (db && createdTokenGrantIds.length > 0) {
|
||||
await db
|
||||
.delete(federationEnrollmentTokens)
|
||||
.where(inArray(federationEnrollmentTokens.grantId, createdTokenGrantIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
||||
}
|
||||
if (db && createdGrantIds.length > 0) {
|
||||
await db
|
||||
.delete(federationGrants)
|
||||
.where(inArray(federationGrants.id, createdGrantIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
||||
}
|
||||
if (db && createdPeerIds.length > 0) {
|
||||
await db
|
||||
.delete(federationPeers)
|
||||
.where(inArray(federationPeers.id, createdPeerIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
||||
}
|
||||
if (db && createdUserIds.length > 0) {
|
||||
await db
|
||||
.delete(schema.users)
|
||||
.where(inArray(schema.users.id, createdUserIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
||||
}
|
||||
if (app)
|
||||
await app.close().catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
||||
if (handle)
|
||||
await handle.close().catch((e: unknown) => console.error('[federation-m2-e2e cleanup]', e));
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #6 — peer add: keypair → enrollment → cert storage → active peer record
|
||||
// -------------------------------------------------------------------------
|
||||
it('#6 — peer add flow: keypair → enrollment → cert storage → active peer record', async () => {
|
||||
// Create a subject user to satisfy FK on federation_grants.subject_user_id
|
||||
const userId = crypto.randomUUID();
|
||||
await db
|
||||
.insert(schema.users)
|
||||
.values({
|
||||
id: userId,
|
||||
name: `e2e-user-${RUN_ID}`,
|
||||
email: `e2e-${RUN_ID}@federation-test.invalid`,
|
||||
emailVerified: false,
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
createdUserIds.push(userId);
|
||||
|
||||
// ── Step A: "Server B" setup ─────────────────────────────────────────
|
||||
// Server B admin creates a grant and generates an enrollment token to
|
||||
// share out-of-band with Server A's operator.
|
||||
|
||||
// Insert a placeholder peer on "Server B" to satisfy the grant FK
|
||||
const serverBPeerId = crypto.randomUUID();
|
||||
await db
|
||||
.insert(federationPeers)
|
||||
.values({
|
||||
id: serverBPeerId,
|
||||
commonName: `server-b-peer-${RUN_ID}`,
|
||||
displayName: 'Server B Placeholder',
|
||||
certPem: '-----BEGIN CERTIFICATE-----\nMOCK\n-----END CERTIFICATE-----\n',
|
||||
certSerial: `serial-b-${serverBPeerId}`,
|
||||
certNotAfter: new Date(Date.now() + 365 * 24 * 60 * 60 * 1000),
|
||||
state: 'pending',
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
createdPeerIds.push(serverBPeerId);
|
||||
|
||||
const grant = await grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: { resources: ['tasks'], excluded_resources: [], max_rows_per_query: 100 },
|
||||
peerId: serverBPeerId,
|
||||
});
|
||||
createdGrantIds.push(grant.id);
|
||||
createdTokenGrantIds.push(grant.id);
|
||||
|
||||
const { token } = await enrollmentService.createToken({
|
||||
grantId: grant.id,
|
||||
peerId: serverBPeerId,
|
||||
ttlSeconds: 900,
|
||||
});
|
||||
|
||||
// ── Step B: "Server A" generates keypair ─────────────────────────────
|
||||
const keypairRes = await agent
|
||||
.post('/api/admin/federation/peers/keypair')
|
||||
.send({
|
||||
commonName: `e2e-peer-${RUN_ID.slice(0, 8)}`,
|
||||
displayName: 'E2E Test Peer',
|
||||
endpointUrl: 'https://test.invalid',
|
||||
})
|
||||
.set('Content-Type', 'application/json');
|
||||
|
||||
expect(keypairRes.status).toBe(201);
|
||||
const { peerId, csrPem } = keypairRes.body as { peerId: string; csrPem: string };
|
||||
expect(typeof peerId).toBe('string');
|
||||
expect(csrPem).toContain('-----BEGIN CERTIFICATE REQUEST-----');
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
// ── Step C: Enrollment (simulates Server A sending CSR to Server B) ──
|
||||
const enrollRes = await agent
|
||||
.post(`/api/federation/enrollment/${token}`)
|
||||
.send({ csrPem })
|
||||
.set('Content-Type', 'application/json');
|
||||
|
||||
expect(enrollRes.status).toBe(200);
|
||||
const { certPem, certChainPem } = enrollRes.body as {
|
||||
certPem: string;
|
||||
certChainPem: string;
|
||||
};
|
||||
expect(certPem).toContain('-----BEGIN CERTIFICATE-----');
|
||||
expect(certChainPem).toContain('-----BEGIN CERTIFICATE-----');
|
||||
|
||||
// ── Step D: "Server A" stores the cert ───────────────────────────────
|
||||
const storeRes = await agent
|
||||
.patch(`/api/admin/federation/peers/${peerId}/cert`)
|
||||
.send({ certPem })
|
||||
.set('Content-Type', 'application/json');
|
||||
|
||||
expect(storeRes.status).toBe(200);
|
||||
|
||||
// ── Step E: Verify peer record in DB ─────────────────────────────────
|
||||
const [peer] = await db
|
||||
.select()
|
||||
.from(federationPeers)
|
||||
.where(eq(federationPeers.id, peerId))
|
||||
.limit(1);
|
||||
|
||||
expect(peer).toBeDefined();
|
||||
expect(peer?.state).toBe('active');
|
||||
expect(peer?.certPem).toContain('-----BEGIN CERTIFICATE-----');
|
||||
expect(typeof peer?.certSerial).toBe('string');
|
||||
expect((peer?.certSerial ?? '').length).toBeGreaterThan(0);
|
||||
// clientKeyPem is a sealed ciphertext — must not be a raw PEM
|
||||
expect(peer?.clientKeyPem?.startsWith('-----BEGIN')).toBe(false);
|
||||
// certNotAfter must be in the future
|
||||
expect(peer?.certNotAfter?.getTime()).toBeGreaterThan(Date.now());
|
||||
}, 60_000);
|
||||
});
|
||||
@@ -0,0 +1,483 @@
|
||||
/**
|
||||
* Federation M2 integration tests (FED-M2-09).
|
||||
*
|
||||
* Covers MILESTONES.md acceptance tests #1, #2, #3, #5, #7, #8.
|
||||
*
|
||||
* Prerequisites:
|
||||
* docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
*
|
||||
* Run DB-only tests (no Step-CA):
|
||||
* FEDERATED_INTEGRATION=1 BETTER_AUTH_SECRET=test-secret pnpm --filter @mosaicstack/gateway test \
|
||||
* src/__tests__/integration/federation-m2.integration.test.ts
|
||||
*
|
||||
* Run all tests including Step-CA-dependent ones:
|
||||
* FEDERATED_INTEGRATION=1 STEP_CA_AVAILABLE=1 \
|
||||
* STEP_CA_URL=https://localhost:9000 \
|
||||
* STEP_CA_PROVISIONER_KEY_JSON="$(docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json)" \
|
||||
* STEP_CA_ROOT_CERT_PATH=/tmp/step-ca-root.crt \
|
||||
* pnpm --filter @mosaicstack/gateway test \
|
||||
* src/__tests__/integration/federation-m2.integration.test.ts
|
||||
*
|
||||
* Obtaining Step-CA credentials:
|
||||
* # Extract provisioner key from running container:
|
||||
* # docker exec $(docker ps -qf name=step-ca) cat /home/step/secrets/mosaic-fed.json
|
||||
* # Copy root cert from container:
|
||||
* # docker cp $(docker ps -qf name=step-ca):/home/step/certs/root_ca.crt /tmp/step-ca-root.crt
|
||||
* # Then: export STEP_CA_ROOT_CERT_PATH=/tmp/step-ca-root.crt
|
||||
*/
|
||||
|
||||
import * as crypto from 'node:crypto';
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
||||
import { Test } from '@nestjs/testing';
|
||||
import { GoneException } from '@nestjs/common';
|
||||
import { Pkcs10CertificateRequestGenerator, X509Certificate as PeculiarX509 } from '@peculiar/x509';
|
||||
import {
|
||||
createDb,
|
||||
type Db,
|
||||
type DbHandle,
|
||||
federationPeers,
|
||||
federationGrants,
|
||||
federationEnrollmentTokens,
|
||||
inArray,
|
||||
eq,
|
||||
} from '@mosaicstack/db';
|
||||
import * as schema from '@mosaicstack/db';
|
||||
import { seal } from '@mosaicstack/auth';
|
||||
import { DB } from '../../database/database.module.js';
|
||||
import { GrantsService } from '../../federation/grants.service.js';
|
||||
import { EnrollmentService } from '../../federation/enrollment.service.js';
|
||||
import { CaService } from '../../federation/ca.service.js';
|
||||
import { FederationScopeError } from '../../federation/scope-schema.js';
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
const stepCaRun = run && process.env['STEP_CA_AVAILABLE'] === '1';
|
||||
|
||||
const PG_URL = 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers for test data isolation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Unique run prefix to identify rows created by this test run. */
|
||||
const RUN_ID = crypto.randomUUID();
|
||||
|
||||
/** Insert a minimal user row to satisfy the FK on federation_grants.subject_user_id. */
|
||||
async function insertTestUser(db: Db, id: string): Promise<void> {
|
||||
await db
|
||||
.insert(schema.users)
|
||||
.values({
|
||||
id,
|
||||
name: `test-user-${id}`,
|
||||
email: `test-${id}@federation-test.invalid`,
|
||||
emailVerified: false,
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
}
|
||||
|
||||
/** Insert a minimal peer row to satisfy the FK on federation_grants.peer_id. */
|
||||
async function insertTestPeer(db: Db, id: string, suffix: string = ''): Promise<void> {
|
||||
await db
|
||||
.insert(federationPeers)
|
||||
.values({
|
||||
id,
|
||||
commonName: `test-peer-${RUN_ID}-${suffix}`,
|
||||
displayName: `Test Peer ${suffix}`,
|
||||
certPem: '-----BEGIN CERTIFICATE-----\nMOCK\n-----END CERTIFICATE-----\n',
|
||||
certSerial: `test-serial-${id}`,
|
||||
certNotAfter: new Date(Date.now() + 365 * 24 * 60 * 60 * 1000),
|
||||
state: 'pending',
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DB-only test module (CaService mocked so env vars not required)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function buildDbModule(db: Db) {
|
||||
return Test.createTestingModule({
|
||||
providers: [
|
||||
{ provide: DB, useValue: db },
|
||||
GrantsService,
|
||||
{
|
||||
provide: CaService,
|
||||
useValue: {
|
||||
issueCert: async () => {
|
||||
throw new Error('CaService.issueCert should not be called in DB-only tests');
|
||||
},
|
||||
},
|
||||
},
|
||||
EnrollmentService,
|
||||
],
|
||||
}).compile();
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test suite — DB-only (no Step-CA)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe.skipIf(!run)('federation M2 — DB-only tests', () => {
|
||||
let handle: DbHandle;
|
||||
let db: Db;
|
||||
let grantsService: GrantsService;
|
||||
|
||||
/** IDs created during this run — cleaned up in afterAll. */
|
||||
const createdGrantIds: string[] = [];
|
||||
const createdPeerIds: string[] = [];
|
||||
const createdUserIds: string[] = [];
|
||||
|
||||
beforeAll(async () => {
|
||||
process.env['BETTER_AUTH_SECRET'] ??= 'test-integration-sealing-key-not-for-prod';
|
||||
|
||||
handle = createDb(PG_URL);
|
||||
db = handle.db;
|
||||
|
||||
const moduleRef = await buildDbModule(db);
|
||||
grantsService = moduleRef.get(GrantsService);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Clean up in FK-safe order: tokens → grants → peers → users
|
||||
if (db && createdGrantIds.length > 0) {
|
||||
await db
|
||||
.delete(federationEnrollmentTokens)
|
||||
.where(inArray(federationEnrollmentTokens.grantId, createdGrantIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
await db
|
||||
.delete(federationGrants)
|
||||
.where(inArray(federationGrants.id, createdGrantIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
}
|
||||
if (db && createdPeerIds.length > 0) {
|
||||
await db
|
||||
.delete(federationPeers)
|
||||
.where(inArray(federationPeers.id, createdPeerIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
}
|
||||
if (db && createdUserIds.length > 0) {
|
||||
await db
|
||||
.delete(schema.users)
|
||||
.where(inArray(schema.users.id, createdUserIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
}
|
||||
if (handle)
|
||||
await handle.close().catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #1 — grant create writes a pending row
|
||||
// -------------------------------------------------------------------------
|
||||
it('#1 — createGrant writes a pending row to DB', async () => {
|
||||
const userId = crypto.randomUUID();
|
||||
const peerId = crypto.randomUUID();
|
||||
const validScope = {
|
||||
resources: ['tasks'],
|
||||
excluded_resources: [],
|
||||
max_rows_per_query: 100,
|
||||
};
|
||||
|
||||
await insertTestUser(db, userId);
|
||||
await insertTestPeer(db, peerId, 'test1');
|
||||
createdUserIds.push(userId);
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
const grant = await grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: validScope,
|
||||
peerId,
|
||||
});
|
||||
|
||||
createdGrantIds.push(grant.id);
|
||||
|
||||
// Verify the row exists in DB with correct shape
|
||||
const [row] = await db
|
||||
.select()
|
||||
.from(federationGrants)
|
||||
.where(eq(federationGrants.id, grant.id))
|
||||
.limit(1);
|
||||
|
||||
expect(row).toBeDefined();
|
||||
expect(row?.status).toBe('pending');
|
||||
expect(row?.peerId).toBe(peerId);
|
||||
expect(row?.subjectUserId).toBe(userId);
|
||||
const storedScope = row?.scope as Record<string, unknown>;
|
||||
expect(storedScope['resources']).toEqual(['tasks']);
|
||||
expect(storedScope['max_rows_per_query']).toBe(100);
|
||||
}, 15_000);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #7 — scope with unknown resource type rejected
|
||||
// -------------------------------------------------------------------------
|
||||
it('#7 — createGrant rejects scope with unknown resource type', async () => {
|
||||
const userId = crypto.randomUUID();
|
||||
const peerId = crypto.randomUUID();
|
||||
const invalidScope = {
|
||||
resources: ['totally_unknown_resource'],
|
||||
excluded_resources: [],
|
||||
max_rows_per_query: 100,
|
||||
};
|
||||
|
||||
await insertTestUser(db, userId);
|
||||
await insertTestPeer(db, peerId, 'test7');
|
||||
createdUserIds.push(userId);
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
await expect(
|
||||
grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: invalidScope,
|
||||
peerId,
|
||||
}),
|
||||
).rejects.toThrow(FederationScopeError);
|
||||
}, 15_000);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #8 — listGrants returns accurate status for grants in various states
|
||||
// -------------------------------------------------------------------------
|
||||
it('#8 — listGrants returns accurate status for grants in various states', async () => {
|
||||
const userId = crypto.randomUUID();
|
||||
const peerId = crypto.randomUUID();
|
||||
const validScope = {
|
||||
resources: ['notes'],
|
||||
excluded_resources: [],
|
||||
max_rows_per_query: 50,
|
||||
};
|
||||
|
||||
await insertTestUser(db, userId);
|
||||
await insertTestPeer(db, peerId, 'test8');
|
||||
createdUserIds.push(userId);
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
// Create two pending grants via GrantsService
|
||||
const grantA = await grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: validScope,
|
||||
peerId,
|
||||
});
|
||||
const grantB = await grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: { resources: ['tasks'], excluded_resources: [], max_rows_per_query: 50 },
|
||||
peerId,
|
||||
});
|
||||
createdGrantIds.push(grantA.id, grantB.id);
|
||||
|
||||
// Insert a third grant directly in 'revoked' state to test status variety
|
||||
const [grantC] = await db
|
||||
.insert(federationGrants)
|
||||
.values({
|
||||
id: crypto.randomUUID(),
|
||||
subjectUserId: userId,
|
||||
peerId,
|
||||
scope: validScope,
|
||||
status: 'revoked',
|
||||
revokedAt: new Date(),
|
||||
})
|
||||
.returning();
|
||||
createdGrantIds.push(grantC!.id);
|
||||
|
||||
// List all grants for this peer
|
||||
const allForPeer = await grantsService.listGrants({ peerId });
|
||||
|
||||
const ourGrantIds = new Set([grantA.id, grantB.id, grantC!.id]);
|
||||
const ourGrants = allForPeer.filter((g) => ourGrantIds.has(g.id));
|
||||
expect(ourGrants).toHaveLength(3);
|
||||
|
||||
const pendingGrants = ourGrants.filter((g) => g.status === 'pending');
|
||||
const revokedGrants = ourGrants.filter((g) => g.status === 'revoked');
|
||||
expect(pendingGrants).toHaveLength(2);
|
||||
expect(revokedGrants).toHaveLength(1);
|
||||
|
||||
// Status-filtered query
|
||||
const pendingOnly = await grantsService.listGrants({ peerId, status: 'pending' });
|
||||
const ourPending = pendingOnly.filter((g) => ourGrantIds.has(g.id));
|
||||
expect(ourPending.every((g) => g.status === 'pending')).toBe(true);
|
||||
|
||||
// Verify peer list from DB also shows the peer rows with correct state
|
||||
const peers = await db.select().from(federationPeers).where(eq(federationPeers.id, peerId));
|
||||
expect(peers).toHaveLength(1);
|
||||
expect(peers[0]?.state).toBe('pending');
|
||||
}, 15_000);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #5 — client_key_pem encrypted at rest
|
||||
// -------------------------------------------------------------------------
|
||||
it('#5 — clientKeyPem stored in DB is a sealed ciphertext (not a valid PEM)', async () => {
|
||||
const peerId = crypto.randomUUID();
|
||||
const rawPem = '-----BEGIN PRIVATE KEY-----\nMOCK\n-----END PRIVATE KEY-----\n';
|
||||
const sealed = seal(rawPem);
|
||||
|
||||
await db.insert(federationPeers).values({
|
||||
id: peerId,
|
||||
commonName: `test-peer-${RUN_ID}-sealed`,
|
||||
displayName: 'Sealed Key Test Peer',
|
||||
certPem: '-----BEGIN CERTIFICATE-----\nMOCK\n-----END CERTIFICATE-----\n',
|
||||
certSerial: `test-serial-sealed-${peerId}`,
|
||||
certNotAfter: new Date(Date.now() + 365 * 24 * 60 * 60 * 1000),
|
||||
state: 'pending',
|
||||
clientKeyPem: sealed,
|
||||
});
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
const [row] = await db
|
||||
.select()
|
||||
.from(federationPeers)
|
||||
.where(eq(federationPeers.id, peerId))
|
||||
.limit(1);
|
||||
|
||||
expect(row).toBeDefined();
|
||||
// The stored value must NOT be a valid PEM — it's a sealed ciphertext blob
|
||||
expect(row?.clientKeyPem).toBeDefined();
|
||||
expect(row?.clientKeyPem?.startsWith('-----BEGIN')).toBe(false);
|
||||
// The sealed value should be non-trivial (at least 20 chars)
|
||||
expect((row?.clientKeyPem ?? '').length).toBeGreaterThan(20);
|
||||
}, 15_000);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test suite — Step-CA gated
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe.skipIf(!stepCaRun)('federation M2 — Step-CA tests', () => {
|
||||
let handle: DbHandle;
|
||||
let db: Db;
|
||||
let grantsService: GrantsService;
|
||||
let enrollmentService: EnrollmentService;
|
||||
|
||||
const createdGrantIds: string[] = [];
|
||||
const createdPeerIds: string[] = [];
|
||||
const createdUserIds: string[] = [];
|
||||
|
||||
beforeAll(async () => {
|
||||
handle = createDb(PG_URL);
|
||||
db = handle.db;
|
||||
|
||||
// Use real CaService — env vars (STEP_CA_URL, STEP_CA_PROVISIONER_KEY_JSON,
|
||||
// STEP_CA_ROOT_CERT_PATH) must be set when STEP_CA_AVAILABLE=1
|
||||
const moduleRef = await Test.createTestingModule({
|
||||
providers: [{ provide: DB, useValue: db }, CaService, GrantsService, EnrollmentService],
|
||||
}).compile();
|
||||
|
||||
grantsService = moduleRef.get(GrantsService);
|
||||
enrollmentService = moduleRef.get(EnrollmentService);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (db && createdGrantIds.length > 0) {
|
||||
await db
|
||||
.delete(federationEnrollmentTokens)
|
||||
.where(inArray(federationEnrollmentTokens.grantId, createdGrantIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
await db
|
||||
.delete(federationGrants)
|
||||
.where(inArray(federationGrants.id, createdGrantIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
}
|
||||
if (db && createdPeerIds.length > 0) {
|
||||
await db
|
||||
.delete(federationPeers)
|
||||
.where(inArray(federationPeers.id, createdPeerIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
}
|
||||
if (db && createdUserIds.length > 0) {
|
||||
await db
|
||||
.delete(schema.users)
|
||||
.where(inArray(schema.users.id, createdUserIds))
|
||||
.catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
}
|
||||
if (handle)
|
||||
await handle.close().catch((e: unknown) => console.error('[federation-m2-test cleanup]', e));
|
||||
});
|
||||
|
||||
/** Generate a P-256 key pair and PKCS#10 CSR, returning the CSR as PEM. */
|
||||
async function generateCsrPem(cn: string): Promise<string> {
|
||||
const alg = { name: 'ECDSA', namedCurve: 'P-256', hash: 'SHA-256' };
|
||||
const keyPair = await crypto.subtle.generateKey(alg, true, ['sign', 'verify']);
|
||||
const csr = await Pkcs10CertificateRequestGenerator.create({
|
||||
name: `CN=${cn}`,
|
||||
keys: keyPair,
|
||||
signingAlgorithm: alg,
|
||||
});
|
||||
return csr.toString('pem');
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #2 — enrollment signs CSR and returns cert
|
||||
// -------------------------------------------------------------------------
|
||||
it('#2 — redeem returns a certPem containing a valid PEM certificate', async () => {
|
||||
const userId = crypto.randomUUID();
|
||||
const peerId = crypto.randomUUID();
|
||||
const validScope = {
|
||||
resources: ['tasks'],
|
||||
excluded_resources: [],
|
||||
max_rows_per_query: 100,
|
||||
};
|
||||
|
||||
await insertTestUser(db, userId);
|
||||
await insertTestPeer(db, peerId, 'ca-test2');
|
||||
createdUserIds.push(userId);
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
const grant = await grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: validScope,
|
||||
peerId,
|
||||
});
|
||||
createdGrantIds.push(grant.id);
|
||||
|
||||
const { token } = await enrollmentService.createToken({
|
||||
grantId: grant.id,
|
||||
peerId,
|
||||
ttlSeconds: 900,
|
||||
});
|
||||
|
||||
const csrPem = await generateCsrPem(`gateway-test-${RUN_ID.slice(0, 8)}`);
|
||||
const result = await enrollmentService.redeem(token, csrPem);
|
||||
|
||||
expect(result.certPem).toContain('-----BEGIN CERTIFICATE-----');
|
||||
expect(result.certChainPem).toContain('-----BEGIN CERTIFICATE-----');
|
||||
|
||||
// Verify the issued cert parses cleanly
|
||||
const cert = new PeculiarX509(result.certPem);
|
||||
expect(cert.serialNumber).toBeTruthy();
|
||||
}, 30_000);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// #3 — token single-use; second attempt returns GoneException
|
||||
// -------------------------------------------------------------------------
|
||||
it('#3 — second redeem of the same token throws GoneException', async () => {
|
||||
const userId = crypto.randomUUID();
|
||||
const peerId = crypto.randomUUID();
|
||||
const validScope = {
|
||||
resources: ['notes'],
|
||||
excluded_resources: [],
|
||||
max_rows_per_query: 50,
|
||||
};
|
||||
|
||||
await insertTestUser(db, userId);
|
||||
await insertTestPeer(db, peerId, 'ca-test3');
|
||||
createdUserIds.push(userId);
|
||||
createdPeerIds.push(peerId);
|
||||
|
||||
const grant = await grantsService.createGrant({
|
||||
subjectUserId: userId,
|
||||
scope: validScope,
|
||||
peerId,
|
||||
});
|
||||
createdGrantIds.push(grant.id);
|
||||
|
||||
const { token } = await enrollmentService.createToken({
|
||||
grantId: grant.id,
|
||||
peerId,
|
||||
ttlSeconds: 900,
|
||||
});
|
||||
|
||||
const csrPem = await generateCsrPem(`gateway-test-replay-${RUN_ID.slice(0, 8)}`);
|
||||
|
||||
// First redeem must succeed
|
||||
const result = await enrollmentService.redeem(token, csrPem);
|
||||
expect(result.certPem).toContain('-----BEGIN CERTIFICATE-----');
|
||||
|
||||
// Second redeem with the same token must be rejected
|
||||
await expect(enrollmentService.redeem(token, csrPem)).rejects.toThrow(GoneException);
|
||||
}, 30_000);
|
||||
});
|
||||
@@ -1,62 +1,10 @@
|
||||
import { Inject, Injectable, Logger } from '@nestjs/common';
|
||||
import { createCipheriv, createDecipheriv, createHash, randomBytes } from 'node:crypto';
|
||||
import { seal, unseal } from '@mosaicstack/auth';
|
||||
import type { Db } from '@mosaicstack/db';
|
||||
import { providerCredentials, eq, and } from '@mosaicstack/db';
|
||||
import { DB } from '../database/database.module.js';
|
||||
import type { ProviderCredentialSummaryDto } from './provider-credentials.dto.js';
|
||||
|
||||
const ALGORITHM = 'aes-256-gcm';
|
||||
const IV_LENGTH = 12; // 96-bit IV for GCM
|
||||
const TAG_LENGTH = 16; // 128-bit auth tag
|
||||
|
||||
/**
|
||||
* Derive a 32-byte AES-256 key from BETTER_AUTH_SECRET using SHA-256.
|
||||
* The secret is assumed to be set in the environment.
|
||||
*/
|
||||
function deriveEncryptionKey(): Buffer {
|
||||
const secret = process.env['BETTER_AUTH_SECRET'];
|
||||
if (!secret) {
|
||||
throw new Error('BETTER_AUTH_SECRET is not set — cannot derive encryption key');
|
||||
}
|
||||
return createHash('sha256').update(secret).digest();
|
||||
}
|
||||
|
||||
/**
|
||||
* Encrypt a plain-text value using AES-256-GCM.
|
||||
* Output format: base64(iv + authTag + ciphertext)
|
||||
*/
|
||||
function encrypt(plaintext: string): string {
|
||||
const key = deriveEncryptionKey();
|
||||
const iv = randomBytes(IV_LENGTH);
|
||||
const cipher = createCipheriv(ALGORITHM, key, iv);
|
||||
|
||||
const encrypted = Buffer.concat([cipher.update(plaintext, 'utf8'), cipher.final()]);
|
||||
const authTag = cipher.getAuthTag();
|
||||
|
||||
// Combine iv (12) + authTag (16) + ciphertext and base64-encode
|
||||
const combined = Buffer.concat([iv, authTag, encrypted]);
|
||||
return combined.toString('base64');
|
||||
}
|
||||
|
||||
/**
|
||||
* Decrypt a value encrypted by `encrypt()`.
|
||||
* Throws on authentication failure (tampered data).
|
||||
*/
|
||||
function decrypt(encoded: string): string {
|
||||
const key = deriveEncryptionKey();
|
||||
const combined = Buffer.from(encoded, 'base64');
|
||||
|
||||
const iv = combined.subarray(0, IV_LENGTH);
|
||||
const authTag = combined.subarray(IV_LENGTH, IV_LENGTH + TAG_LENGTH);
|
||||
const ciphertext = combined.subarray(IV_LENGTH + TAG_LENGTH);
|
||||
|
||||
const decipher = createDecipheriv(ALGORITHM, key, iv);
|
||||
decipher.setAuthTag(authTag);
|
||||
|
||||
const decrypted = Buffer.concat([decipher.update(ciphertext), decipher.final()]);
|
||||
return decrypted.toString('utf8');
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class ProviderCredentialsService {
|
||||
private readonly logger = new Logger(ProviderCredentialsService.name);
|
||||
@@ -74,7 +22,7 @@ export class ProviderCredentialsService {
|
||||
value: string,
|
||||
metadata?: Record<string, unknown>,
|
||||
): Promise<void> {
|
||||
const encryptedValue = encrypt(value);
|
||||
const encryptedValue = seal(value);
|
||||
|
||||
await this.db
|
||||
.insert(providerCredentials)
|
||||
@@ -122,7 +70,7 @@ export class ProviderCredentialsService {
|
||||
}
|
||||
|
||||
try {
|
||||
return decrypt(row.encryptedValue);
|
||||
return unseal(row.encryptedValue);
|
||||
} catch (err) {
|
||||
this.logger.error(
|
||||
`Failed to decrypt credential for user=${userId} provider=${provider}`,
|
||||
|
||||
@@ -24,6 +24,7 @@ import { GCModule } from './gc/gc.module.js';
|
||||
import { ReloadModule } from './reload/reload.module.js';
|
||||
import { WorkspaceModule } from './workspace/workspace.module.js';
|
||||
import { QueueModule } from './queue/queue.module.js';
|
||||
import { FederationModule } from './federation/federation.module.js';
|
||||
import { ThrottlerGuard, ThrottlerModule } from '@nestjs/throttler';
|
||||
|
||||
@Module({
|
||||
@@ -52,6 +53,7 @@ import { ThrottlerGuard, ThrottlerModule } from '@nestjs/throttler';
|
||||
QueueModule,
|
||||
ReloadModule,
|
||||
WorkspaceModule,
|
||||
FederationModule,
|
||||
],
|
||||
controllers: [HealthController],
|
||||
providers: [
|
||||
|
||||
373
apps/gateway/src/federation/__tests__/enrollment.service.spec.ts
Normal file
373
apps/gateway/src/federation/__tests__/enrollment.service.spec.ts
Normal file
@@ -0,0 +1,373 @@
|
||||
/**
|
||||
* Unit tests for EnrollmentService — federation enrollment token flow (FED-M2-07).
|
||||
*
|
||||
* Coverage:
|
||||
* createToken:
|
||||
* - inserts token row with correct grantId, peerId, and future expiresAt
|
||||
* - returns { token, expiresAt } with a 64-char hex token
|
||||
* - clamps ttlSeconds to 900
|
||||
*
|
||||
* redeem — error paths:
|
||||
* - NotFoundException when token row not found
|
||||
* - GoneException when token already used (usedAt set)
|
||||
* - GoneException when token expired (expiresAt < now)
|
||||
* - GoneException when grant status is not pending
|
||||
*
|
||||
* redeem — success path:
|
||||
* - atomically claims token BEFORE cert issuance (claim → issueCert → tx)
|
||||
* - calls CaService.issueCert with correct args
|
||||
* - activates grant + updates peer + writes audit log inside a transaction
|
||||
* - returns { certPem, certChainPem }
|
||||
*
|
||||
* redeem — replay protection:
|
||||
* - GoneException when claim UPDATE returns empty array (concurrent request won)
|
||||
*/
|
||||
|
||||
import 'reflect-metadata';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { GoneException, NotFoundException } from '@nestjs/common';
|
||||
import type { Db } from '@mosaicstack/db';
|
||||
import { EnrollmentService } from '../enrollment.service.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Test constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const GRANT_ID = 'g1111111-1111-1111-1111-111111111111';
|
||||
const PEER_ID = 'p2222222-2222-2222-2222-222222222222';
|
||||
const USER_ID = 'u3333333-3333-3333-3333-333333333333';
|
||||
const TOKEN = 'a'.repeat(64); // 64-char hex
|
||||
|
||||
const MOCK_CERT_PEM = '-----BEGIN CERTIFICATE-----\nMOCK\n-----END CERTIFICATE-----\n';
|
||||
const MOCK_CHAIN_PEM = MOCK_CERT_PEM + MOCK_CERT_PEM;
|
||||
const MOCK_SERIAL = 'ABCD1234';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Factory helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeTokenRow(overrides: Partial<Record<string, unknown>> = {}) {
|
||||
return {
|
||||
token: TOKEN,
|
||||
grantId: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
expiresAt: new Date(Date.now() + 60_000), // 1 min from now
|
||||
usedAt: null,
|
||||
createdAt: new Date(),
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeGrant(overrides: Partial<Record<string, unknown>> = {}) {
|
||||
return {
|
||||
id: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
subjectUserId: USER_ID,
|
||||
scope: { resources: ['tasks'], excluded_resources: [], max_rows_per_query: 100 },
|
||||
status: 'pending',
|
||||
expiresAt: null,
|
||||
createdAt: new Date(),
|
||||
revokedAt: null,
|
||||
revokedReason: null,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mock DB builder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeDb({
|
||||
tokenRows = [makeTokenRow()],
|
||||
// claimedRows is returned by the .returning() on the token-claim UPDATE.
|
||||
// Empty array = concurrent request won the race (GoneException).
|
||||
claimedRows = [{ token: TOKEN }],
|
||||
}: {
|
||||
tokenRows?: unknown[];
|
||||
claimedRows?: unknown[];
|
||||
} = {}) {
|
||||
// insert().values() — for createToken (outer db, not tx)
|
||||
const insertValues = vi.fn().mockResolvedValue(undefined);
|
||||
const insertMock = vi.fn().mockReturnValue({ values: insertValues });
|
||||
|
||||
// select().from().where().limit() — for fetching the token row
|
||||
const limitSelect = vi.fn().mockResolvedValue(tokenRows);
|
||||
const whereSelect = vi.fn().mockReturnValue({ limit: limitSelect });
|
||||
const fromSelect = vi.fn().mockReturnValue({ where: whereSelect });
|
||||
const selectMock = vi.fn().mockReturnValue({ from: fromSelect });
|
||||
|
||||
// update().set().where().returning() — for the atomic token claim (outer db)
|
||||
const returningMock = vi.fn().mockResolvedValue(claimedRows);
|
||||
const whereClaimUpdate = vi.fn().mockReturnValue({ returning: returningMock });
|
||||
const setClaimMock = vi.fn().mockReturnValue({ where: whereClaimUpdate });
|
||||
const claimUpdateMock = vi.fn().mockReturnValue({ set: setClaimMock });
|
||||
|
||||
// transaction(cb) — cb receives txMock; txMock has update + insert
|
||||
const txInsertValues = vi.fn().mockResolvedValue(undefined);
|
||||
const txInsertMock = vi.fn().mockReturnValue({ values: txInsertValues });
|
||||
const txWhereUpdate = vi.fn().mockResolvedValue(undefined);
|
||||
const txSetMock = vi.fn().mockReturnValue({ where: txWhereUpdate });
|
||||
const txUpdateMock = vi.fn().mockReturnValue({ set: txSetMock });
|
||||
const txMock = { update: txUpdateMock, insert: txInsertMock };
|
||||
const transactionMock = vi
|
||||
.fn()
|
||||
.mockImplementation(async (cb: (tx: typeof txMock) => Promise<void>) => cb(txMock));
|
||||
|
||||
return {
|
||||
insert: insertMock,
|
||||
select: selectMock,
|
||||
update: claimUpdateMock,
|
||||
transaction: transactionMock,
|
||||
_mocks: {
|
||||
insertValues,
|
||||
insertMock,
|
||||
limitSelect,
|
||||
whereSelect,
|
||||
fromSelect,
|
||||
selectMock,
|
||||
returningMock,
|
||||
whereClaimUpdate,
|
||||
setClaimMock,
|
||||
claimUpdateMock,
|
||||
txInsertValues,
|
||||
txInsertMock,
|
||||
txWhereUpdate,
|
||||
txSetMock,
|
||||
txUpdateMock,
|
||||
txMock,
|
||||
transactionMock,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mock CaService
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeCaService() {
|
||||
return {
|
||||
issueCert: vi.fn().mockResolvedValue({
|
||||
certPem: MOCK_CERT_PEM,
|
||||
certChainPem: MOCK_CHAIN_PEM,
|
||||
serialNumber: MOCK_SERIAL,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mock GrantsService
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeGrantsService(grantOverrides: Partial<Record<string, unknown>> = {}) {
|
||||
return {
|
||||
getGrant: vi.fn().mockResolvedValue(makeGrant(grantOverrides)),
|
||||
activateGrant: vi.fn().mockResolvedValue(makeGrant({ status: 'active' })),
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helper: build service under test
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function buildService({
|
||||
db = makeDb(),
|
||||
caService = makeCaService(),
|
||||
grantsService = makeGrantsService(),
|
||||
}: {
|
||||
db?: ReturnType<typeof makeDb>;
|
||||
caService?: ReturnType<typeof makeCaService>;
|
||||
grantsService?: ReturnType<typeof makeGrantsService>;
|
||||
} = {}) {
|
||||
return new EnrollmentService(db as unknown as Db, caService as never, grantsService as never);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests: createToken
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('EnrollmentService.createToken', () => {
|
||||
it('inserts a token row and returns { token, expiresAt }', async () => {
|
||||
const db = makeDb();
|
||||
const service = buildService({ db });
|
||||
|
||||
const result = await service.createToken({
|
||||
grantId: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
ttlSeconds: 900,
|
||||
});
|
||||
|
||||
expect(result.token).toHaveLength(64); // 32 bytes hex
|
||||
expect(result.expiresAt).toBeDefined();
|
||||
expect(new Date(result.expiresAt).getTime()).toBeGreaterThan(Date.now());
|
||||
expect(db._mocks.insertValues).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ grantId: GRANT_ID, peerId: PEER_ID }),
|
||||
);
|
||||
});
|
||||
|
||||
it('clamps ttlSeconds to 900', async () => {
|
||||
const db = makeDb();
|
||||
const service = buildService({ db });
|
||||
|
||||
const before = Date.now();
|
||||
const result = await service.createToken({
|
||||
grantId: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
ttlSeconds: 9999,
|
||||
});
|
||||
const after = Date.now();
|
||||
|
||||
const expiresMs = new Date(result.expiresAt).getTime();
|
||||
// Should be at most 900s from now
|
||||
expect(expiresMs - before).toBeLessThanOrEqual(900_000 + 100);
|
||||
expect(expiresMs - after).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests: redeem — error paths
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('EnrollmentService.redeem — error paths', () => {
|
||||
it('throws NotFoundException when token row not found', async () => {
|
||||
const db = makeDb({ tokenRows: [] });
|
||||
const service = buildService({ db });
|
||||
|
||||
await expect(service.redeem(TOKEN, '---CSR---')).rejects.toBeInstanceOf(NotFoundException);
|
||||
});
|
||||
|
||||
it('throws GoneException when usedAt is set (already redeemed)', async () => {
|
||||
const db = makeDb({ tokenRows: [makeTokenRow({ usedAt: new Date(Date.now() - 1000) })] });
|
||||
const service = buildService({ db });
|
||||
|
||||
await expect(service.redeem(TOKEN, '---CSR---')).rejects.toBeInstanceOf(GoneException);
|
||||
});
|
||||
|
||||
it('throws GoneException when token has expired', async () => {
|
||||
const db = makeDb({ tokenRows: [makeTokenRow({ expiresAt: new Date(Date.now() - 1000) })] });
|
||||
const service = buildService({ db });
|
||||
|
||||
await expect(service.redeem(TOKEN, '---CSR---')).rejects.toBeInstanceOf(GoneException);
|
||||
});
|
||||
|
||||
it('throws GoneException when grant status is not pending', async () => {
|
||||
const db = makeDb();
|
||||
const grantsService = makeGrantsService({ status: 'active' });
|
||||
const service = buildService({ db, grantsService });
|
||||
|
||||
await expect(service.redeem(TOKEN, '---CSR---')).rejects.toBeInstanceOf(GoneException);
|
||||
});
|
||||
|
||||
it('throws GoneException when token claim UPDATE returns empty array (concurrent replay)', async () => {
|
||||
const db = makeDb({ claimedRows: [] });
|
||||
const caService = makeCaService();
|
||||
const grantsService = makeGrantsService();
|
||||
const service = buildService({ db, caService, grantsService });
|
||||
|
||||
await expect(service.redeem(TOKEN, '---CSR---')).rejects.toBeInstanceOf(GoneException);
|
||||
});
|
||||
|
||||
it('does NOT call issueCert when token claim fails (no double minting)', async () => {
|
||||
const db = makeDb({ claimedRows: [] });
|
||||
const caService = makeCaService();
|
||||
const service = buildService({ db, caService });
|
||||
|
||||
await expect(service.redeem(TOKEN, '---CSR---')).rejects.toBeInstanceOf(GoneException);
|
||||
expect(caService.issueCert).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests: redeem — success path
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('EnrollmentService.redeem — success path', () => {
|
||||
let db: ReturnType<typeof makeDb>;
|
||||
let caService: ReturnType<typeof makeCaService>;
|
||||
let grantsService: ReturnType<typeof makeGrantsService>;
|
||||
let service: EnrollmentService;
|
||||
|
||||
beforeEach(() => {
|
||||
db = makeDb();
|
||||
caService = makeCaService();
|
||||
grantsService = makeGrantsService();
|
||||
service = buildService({ db, caService, grantsService });
|
||||
});
|
||||
|
||||
it('claims token BEFORE calling issueCert (prevents double minting)', async () => {
|
||||
const callOrder: string[] = [];
|
||||
db._mocks.returningMock.mockImplementation(async () => {
|
||||
callOrder.push('claim');
|
||||
return [{ token: TOKEN }];
|
||||
});
|
||||
caService.issueCert.mockImplementation(async () => {
|
||||
callOrder.push('issueCert');
|
||||
return { certPem: MOCK_CERT_PEM, certChainPem: MOCK_CHAIN_PEM, serialNumber: MOCK_SERIAL };
|
||||
});
|
||||
|
||||
await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(callOrder).toEqual(['claim', 'issueCert']);
|
||||
});
|
||||
|
||||
it('calls CaService.issueCert with grantId, subjectUserId, csrPem, ttlSeconds=300', async () => {
|
||||
await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(caService.issueCert).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
grantId: GRANT_ID,
|
||||
subjectUserId: USER_ID,
|
||||
csrPem: MOCK_CERT_PEM,
|
||||
ttlSeconds: 300,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('runs activate grant + peer update + audit inside a transaction', async () => {
|
||||
await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(db._mocks.transactionMock).toHaveBeenCalledOnce();
|
||||
// tx.update called twice: activate grant + update peer
|
||||
expect(db._mocks.txUpdateMock).toHaveBeenCalledTimes(2);
|
||||
// tx.insert called once: audit log
|
||||
expect(db._mocks.txInsertMock).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('activates grant (sets status=active) inside the transaction', async () => {
|
||||
await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(db._mocks.txSetMock).toHaveBeenCalledWith(expect.objectContaining({ status: 'active' }));
|
||||
});
|
||||
|
||||
it('updates the federationPeers row with certPem, certSerial, state=active inside the transaction', async () => {
|
||||
await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(db._mocks.txSetMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
certPem: MOCK_CERT_PEM,
|
||||
certSerial: MOCK_SERIAL,
|
||||
state: 'active',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('inserts an audit log row inside the transaction', async () => {
|
||||
await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(db._mocks.txInsertValues).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
peerId: PEER_ID,
|
||||
grantId: GRANT_ID,
|
||||
verb: 'enrollment',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('returns { certPem, certChainPem } from CaService', async () => {
|
||||
const result = await service.redeem(TOKEN, MOCK_CERT_PEM);
|
||||
|
||||
expect(result).toEqual({
|
||||
certPem: MOCK_CERT_PEM,
|
||||
certChainPem: MOCK_CHAIN_PEM,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,212 @@
|
||||
/**
|
||||
* Unit tests for FederationController (FED-M2-08).
|
||||
*
|
||||
* Coverage:
|
||||
* - listGrants: delegates to GrantsService with query params
|
||||
* - createGrant: delegates to GrantsService, validates body
|
||||
* - generateToken: returns enrollmentUrl containing the token
|
||||
* - listPeers: returns DB rows
|
||||
*/
|
||||
|
||||
import 'reflect-metadata';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { NotFoundException } from '@nestjs/common';
|
||||
import type { Db } from '@mosaicstack/db';
|
||||
import { FederationController } from '../federation.controller.js';
|
||||
import type { GrantsService } from '../grants.service.js';
|
||||
import type { EnrollmentService } from '../enrollment.service.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const GRANT_ID = 'g1111111-1111-1111-1111-111111111111';
|
||||
const PEER_ID = 'p2222222-2222-2222-2222-222222222222';
|
||||
const USER_ID = 'u3333333-3333-3333-3333-333333333333';
|
||||
|
||||
const MOCK_GRANT = {
|
||||
id: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
subjectUserId: USER_ID,
|
||||
scope: { resources: ['tasks'], operations: ['list'] },
|
||||
status: 'pending' as const,
|
||||
expiresAt: null,
|
||||
createdAt: new Date('2026-01-01T00:00:00Z'),
|
||||
revokedAt: null,
|
||||
revokedReason: null,
|
||||
};
|
||||
|
||||
const MOCK_PEER = {
|
||||
id: PEER_ID,
|
||||
commonName: 'test-peer',
|
||||
displayName: 'Test Peer',
|
||||
certPem: '',
|
||||
certSerial: 'pending',
|
||||
certNotAfter: new Date(0),
|
||||
clientKeyPem: null,
|
||||
state: 'pending' as const,
|
||||
endpointUrl: null,
|
||||
createdAt: new Date('2026-01-01T00:00:00Z'),
|
||||
updatedAt: new Date('2026-01-01T00:00:00Z'),
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DB mock builder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeDbMock(rows: unknown[] = []) {
|
||||
const orderBy = vi.fn().mockResolvedValue(rows);
|
||||
const where = vi.fn().mockReturnValue({ orderBy });
|
||||
const from = vi.fn().mockReturnValue({ where, orderBy });
|
||||
const select = vi.fn().mockReturnValue({ from });
|
||||
|
||||
return {
|
||||
select,
|
||||
from,
|
||||
where,
|
||||
orderBy,
|
||||
insert: vi.fn(),
|
||||
update: vi.fn(),
|
||||
delete: vi.fn(),
|
||||
} as unknown as Db;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('FederationController', () => {
|
||||
let db: Db;
|
||||
let grantsService: GrantsService;
|
||||
let enrollmentService: EnrollmentService;
|
||||
let controller: FederationController;
|
||||
|
||||
beforeEach(() => {
|
||||
db = makeDbMock([MOCK_PEER]);
|
||||
|
||||
grantsService = {
|
||||
createGrant: vi.fn().mockResolvedValue(MOCK_GRANT),
|
||||
getGrant: vi.fn().mockResolvedValue(MOCK_GRANT),
|
||||
listGrants: vi.fn().mockResolvedValue([MOCK_GRANT]),
|
||||
revokeGrant: vi.fn().mockResolvedValue({ ...MOCK_GRANT, status: 'revoked' }),
|
||||
activateGrant: vi.fn(),
|
||||
expireGrant: vi.fn(),
|
||||
} as unknown as GrantsService;
|
||||
|
||||
enrollmentService = {
|
||||
createToken: vi.fn().mockResolvedValue({
|
||||
token: 'abc123def456abc123def456abc123def456abc123def456abc123def456ab12',
|
||||
expiresAt: '2026-01-01T00:15:00.000Z',
|
||||
}),
|
||||
redeem: vi.fn(),
|
||||
} as unknown as EnrollmentService;
|
||||
|
||||
controller = new FederationController(db, grantsService, enrollmentService);
|
||||
});
|
||||
|
||||
// ─── Grant management ──────────────────────────────────────────────────
|
||||
|
||||
describe('listGrants', () => {
|
||||
it('delegates to GrantsService with provided query params', async () => {
|
||||
const query = { peerId: PEER_ID, status: 'pending' as const };
|
||||
const result = await controller.listGrants(query);
|
||||
|
||||
expect(grantsService.listGrants).toHaveBeenCalledWith(query);
|
||||
expect(result).toEqual([MOCK_GRANT]);
|
||||
});
|
||||
|
||||
it('delegates to GrantsService with empty filters', async () => {
|
||||
const result = await controller.listGrants({});
|
||||
|
||||
expect(grantsService.listGrants).toHaveBeenCalledWith({});
|
||||
expect(result).toEqual([MOCK_GRANT]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createGrant', () => {
|
||||
it('delegates to GrantsService and returns created grant', async () => {
|
||||
const body = {
|
||||
peerId: PEER_ID,
|
||||
subjectUserId: USER_ID,
|
||||
scope: { resources: ['tasks'], operations: ['list'] },
|
||||
};
|
||||
|
||||
const result = await controller.createGrant(body);
|
||||
|
||||
expect(grantsService.createGrant).toHaveBeenCalledWith(body);
|
||||
expect(result).toEqual(MOCK_GRANT);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getGrant', () => {
|
||||
it('delegates to GrantsService with provided ID', async () => {
|
||||
const result = await controller.getGrant(GRANT_ID);
|
||||
|
||||
expect(grantsService.getGrant).toHaveBeenCalledWith(GRANT_ID);
|
||||
expect(result).toEqual(MOCK_GRANT);
|
||||
});
|
||||
});
|
||||
|
||||
describe('revokeGrant', () => {
|
||||
it('delegates to GrantsService with id and reason', async () => {
|
||||
const result = await controller.revokeGrant(GRANT_ID, { reason: 'test reason' });
|
||||
|
||||
expect(grantsService.revokeGrant).toHaveBeenCalledWith(GRANT_ID, 'test reason');
|
||||
expect(result).toMatchObject({ status: 'revoked' });
|
||||
});
|
||||
|
||||
it('delegates without reason when omitted', async () => {
|
||||
await controller.revokeGrant(GRANT_ID, {});
|
||||
|
||||
expect(grantsService.revokeGrant).toHaveBeenCalledWith(GRANT_ID, undefined);
|
||||
});
|
||||
});
|
||||
|
||||
describe('generateToken', () => {
|
||||
it('returns enrollmentUrl containing the token', async () => {
|
||||
const token = 'abc123def456abc123def456abc123def456abc123def456abc123def456ab12';
|
||||
vi.mocked(enrollmentService.createToken).mockResolvedValueOnce({
|
||||
token,
|
||||
expiresAt: '2026-01-01T00:15:00.000Z',
|
||||
});
|
||||
|
||||
const result = await controller.generateToken(GRANT_ID, { ttlSeconds: 900 });
|
||||
|
||||
expect(result.token).toBe(token);
|
||||
expect(result.enrollmentUrl).toContain(token);
|
||||
expect(result.enrollmentUrl).toContain('/api/federation/enrollment/');
|
||||
});
|
||||
|
||||
it('creates token via EnrollmentService with correct grantId and peerId', async () => {
|
||||
await controller.generateToken(GRANT_ID, { ttlSeconds: 300 });
|
||||
|
||||
expect(enrollmentService.createToken).toHaveBeenCalledWith({
|
||||
grantId: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
ttlSeconds: 300,
|
||||
});
|
||||
});
|
||||
|
||||
it('throws NotFoundException when grant does not exist', async () => {
|
||||
vi.mocked(grantsService.getGrant).mockRejectedValueOnce(
|
||||
new NotFoundException(`Grant ${GRANT_ID} not found`),
|
||||
);
|
||||
|
||||
await expect(controller.generateToken(GRANT_ID, { ttlSeconds: 900 })).rejects.toThrow(
|
||||
NotFoundException,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── Peer management ───────────────────────────────────────────────────
|
||||
|
||||
describe('listPeers', () => {
|
||||
it('returns DB rows ordered by commonName', async () => {
|
||||
const result = await controller.listPeers();
|
||||
|
||||
expect(db.select).toHaveBeenCalled();
|
||||
// The DB mock resolves with [MOCK_PEER]
|
||||
expect(result).toEqual([MOCK_PEER]);
|
||||
});
|
||||
});
|
||||
});
|
||||
351
apps/gateway/src/federation/__tests__/grants.service.spec.ts
Normal file
351
apps/gateway/src/federation/__tests__/grants.service.spec.ts
Normal file
@@ -0,0 +1,351 @@
|
||||
/**
|
||||
* Unit tests for GrantsService — federation grants CRUD + status transitions (FED-M2-06).
|
||||
*
|
||||
* Coverage:
|
||||
* - createGrant: validates scope via parseFederationScope
|
||||
* - createGrant: inserts with status 'pending'
|
||||
* - getGrant: returns grant when found
|
||||
* - getGrant: throws NotFoundException when not found
|
||||
* - listGrants: no filters returns all grants
|
||||
* - listGrants: filters by peerId
|
||||
* - listGrants: filters by subjectUserId
|
||||
* - listGrants: filters by status
|
||||
* - listGrants: multiple filters combined
|
||||
* - activateGrant: pending → active works
|
||||
* - activateGrant: non-pending throws ConflictException
|
||||
* - revokeGrant: active → revoked works, sets revokedAt
|
||||
* - revokeGrant: non-active throws ConflictException
|
||||
* - expireGrant: active → expired works
|
||||
* - expireGrant: non-active throws ConflictException
|
||||
*/
|
||||
|
||||
import 'reflect-metadata';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { ConflictException, NotFoundException } from '@nestjs/common';
|
||||
import type { Db } from '@mosaicstack/db';
|
||||
import { GrantsService } from '../grants.service.js';
|
||||
import { FederationScopeError } from '../scope-schema.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Minimal valid federation scope for testing
|
||||
// ---------------------------------------------------------------------------
|
||||
const VALID_SCOPE = {
|
||||
resources: ['tasks'] as const,
|
||||
excluded_resources: [],
|
||||
max_rows_per_query: 100,
|
||||
};
|
||||
|
||||
const PEER_ID = 'a1111111-1111-1111-1111-111111111111';
|
||||
const USER_ID = 'u2222222-2222-2222-2222-222222222222';
|
||||
const GRANT_ID = 'g3333333-3333-3333-3333-333333333333';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Build a mock DB that mimics chained Drizzle query builder calls
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeMockGrant(overrides: Partial<Record<string, unknown>> = {}) {
|
||||
return {
|
||||
id: GRANT_ID,
|
||||
peerId: PEER_ID,
|
||||
subjectUserId: USER_ID,
|
||||
scope: VALID_SCOPE,
|
||||
status: 'pending',
|
||||
expiresAt: null,
|
||||
createdAt: new Date('2026-01-01T00:00:00Z'),
|
||||
revokedAt: null,
|
||||
revokedReason: null,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeDb(
|
||||
overrides: {
|
||||
insertReturning?: unknown[];
|
||||
selectRows?: unknown[];
|
||||
updateReturning?: unknown[];
|
||||
} = {},
|
||||
) {
|
||||
const insertReturning = overrides.insertReturning ?? [makeMockGrant()];
|
||||
const selectRows = overrides.selectRows ?? [makeMockGrant()];
|
||||
const updateReturning = overrides.updateReturning ?? [makeMockGrant({ status: 'active' })];
|
||||
|
||||
// Drizzle returns a chainable builder; we need to mock the full chain.
|
||||
const returningInsert = vi.fn().mockResolvedValue(insertReturning);
|
||||
const valuesInsert = vi.fn().mockReturnValue({ returning: returningInsert });
|
||||
const insertMock = vi.fn().mockReturnValue({ values: valuesInsert });
|
||||
|
||||
// select().from().where().limit()
|
||||
const limitSelect = vi.fn().mockResolvedValue(selectRows);
|
||||
const whereSelect = vi.fn().mockReturnValue({ limit: limitSelect });
|
||||
// from returns something that is both thenable (for full-table select) and has .where()
|
||||
const fromSelect = vi.fn().mockReturnValue({
|
||||
where: whereSelect,
|
||||
limit: limitSelect,
|
||||
// Make it thenable for listGrants with no filters (await db.select().from(federationGrants))
|
||||
then: (resolve: (v: unknown) => unknown) => resolve(selectRows),
|
||||
});
|
||||
const selectMock = vi.fn().mockReturnValue({ from: fromSelect });
|
||||
|
||||
const returningUpdate = vi.fn().mockResolvedValue(updateReturning);
|
||||
const whereUpdate = vi.fn().mockReturnValue({ returning: returningUpdate });
|
||||
const setMock = vi.fn().mockReturnValue({ where: whereUpdate });
|
||||
const updateMock = vi.fn().mockReturnValue({ set: setMock });
|
||||
|
||||
return {
|
||||
insert: insertMock,
|
||||
select: selectMock,
|
||||
update: updateMock,
|
||||
// Expose internals for assertions
|
||||
_mocks: {
|
||||
insertReturning,
|
||||
valuesInsert,
|
||||
insertMock,
|
||||
limitSelect,
|
||||
whereSelect,
|
||||
fromSelect,
|
||||
selectMock,
|
||||
returningUpdate,
|
||||
whereUpdate,
|
||||
setMock,
|
||||
updateMock,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('GrantsService', () => {
|
||||
let db: ReturnType<typeof makeDb>;
|
||||
let service: GrantsService;
|
||||
|
||||
beforeEach(() => {
|
||||
db = makeDb();
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
});
|
||||
|
||||
// ─── createGrant ──────────────────────────────────────────────────────────
|
||||
|
||||
describe('createGrant', () => {
|
||||
it('calls parseFederationScope — rejects an invalid scope', async () => {
|
||||
const invalidScope = { resources: [], max_rows_per_query: 0 };
|
||||
await expect(
|
||||
service.createGrant({ peerId: PEER_ID, subjectUserId: USER_ID, scope: invalidScope }),
|
||||
).rejects.toBeInstanceOf(FederationScopeError);
|
||||
});
|
||||
|
||||
it('inserts a grant with status pending and returns it', async () => {
|
||||
const result = await service.createGrant({
|
||||
peerId: PEER_ID,
|
||||
subjectUserId: USER_ID,
|
||||
scope: VALID_SCOPE,
|
||||
});
|
||||
|
||||
expect(db._mocks.valuesInsert).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ status: 'pending', peerId: PEER_ID, subjectUserId: USER_ID }),
|
||||
);
|
||||
expect(result.status).toBe('pending');
|
||||
});
|
||||
|
||||
it('passes expiresAt as a Date when provided', async () => {
|
||||
await service.createGrant({
|
||||
peerId: PEER_ID,
|
||||
subjectUserId: USER_ID,
|
||||
scope: VALID_SCOPE,
|
||||
expiresAt: '2027-01-01T00:00:00Z',
|
||||
});
|
||||
|
||||
expect(db._mocks.valuesInsert).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ expiresAt: expect.any(Date) }),
|
||||
);
|
||||
});
|
||||
|
||||
it('sets expiresAt to null when not provided', async () => {
|
||||
await service.createGrant({ peerId: PEER_ID, subjectUserId: USER_ID, scope: VALID_SCOPE });
|
||||
|
||||
expect(db._mocks.valuesInsert).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ expiresAt: null }),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── getGrant ─────────────────────────────────────────────────────────────
|
||||
|
||||
describe('getGrant', () => {
|
||||
it('returns the grant when found', async () => {
|
||||
const result = await service.getGrant(GRANT_ID);
|
||||
expect(result.id).toBe(GRANT_ID);
|
||||
});
|
||||
|
||||
it('throws NotFoundException when no rows returned', async () => {
|
||||
db = makeDb({ selectRows: [] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
await expect(service.getGrant(GRANT_ID)).rejects.toBeInstanceOf(NotFoundException);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── listGrants ───────────────────────────────────────────────────────────
|
||||
|
||||
describe('listGrants', () => {
|
||||
it('queries without where clause when no filters provided', async () => {
|
||||
const result = await service.listGrants({});
|
||||
expect(Array.isArray(result)).toBe(true);
|
||||
});
|
||||
|
||||
it('applies peerId filter', async () => {
|
||||
await service.listGrants({ peerId: PEER_ID });
|
||||
expect(db._mocks.whereSelect).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('applies subjectUserId filter', async () => {
|
||||
await service.listGrants({ subjectUserId: USER_ID });
|
||||
expect(db._mocks.whereSelect).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('applies status filter', async () => {
|
||||
await service.listGrants({ status: 'active' });
|
||||
expect(db._mocks.whereSelect).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('applies multiple filters combined', async () => {
|
||||
await service.listGrants({ peerId: PEER_ID, status: 'pending' });
|
||||
expect(db._mocks.whereSelect).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
// ─── activateGrant ────────────────────────────────────────────────────────
|
||||
|
||||
describe('activateGrant', () => {
|
||||
it('transitions pending → active and returns updated grant', async () => {
|
||||
db = makeDb({
|
||||
selectRows: [makeMockGrant({ status: 'pending' })],
|
||||
updateReturning: [makeMockGrant({ status: 'active' })],
|
||||
});
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
const result = await service.activateGrant(GRANT_ID);
|
||||
|
||||
expect(db._mocks.setMock).toHaveBeenCalledWith({ status: 'active' });
|
||||
expect(result.status).toBe('active');
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is already active', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'active' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.activateGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is revoked', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'revoked' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.activateGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is expired', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'expired' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.activateGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── revokeGrant ──────────────────────────────────────────────────────────
|
||||
|
||||
describe('revokeGrant', () => {
|
||||
it('transitions active → revoked and sets revokedAt', async () => {
|
||||
const revokedAt = new Date();
|
||||
db = makeDb({
|
||||
selectRows: [makeMockGrant({ status: 'active' })],
|
||||
updateReturning: [makeMockGrant({ status: 'revoked', revokedAt })],
|
||||
});
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
const result = await service.revokeGrant(GRANT_ID, 'test reason');
|
||||
|
||||
expect(db._mocks.setMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
status: 'revoked',
|
||||
revokedAt: expect.any(Date),
|
||||
revokedReason: 'test reason',
|
||||
}),
|
||||
);
|
||||
expect(result.status).toBe('revoked');
|
||||
});
|
||||
|
||||
it('sets revokedReason to null when not provided', async () => {
|
||||
db = makeDb({
|
||||
selectRows: [makeMockGrant({ status: 'active' })],
|
||||
updateReturning: [makeMockGrant({ status: 'revoked', revokedAt: new Date() })],
|
||||
});
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await service.revokeGrant(GRANT_ID);
|
||||
|
||||
expect(db._mocks.setMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ revokedReason: null }),
|
||||
);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is pending', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'pending' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.revokeGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is already revoked', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'revoked' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.revokeGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is expired', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'expired' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.revokeGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
});
|
||||
|
||||
// ─── expireGrant ──────────────────────────────────────────────────────────
|
||||
|
||||
describe('expireGrant', () => {
|
||||
it('transitions active → expired and returns updated grant', async () => {
|
||||
db = makeDb({
|
||||
selectRows: [makeMockGrant({ status: 'active' })],
|
||||
updateReturning: [makeMockGrant({ status: 'expired' })],
|
||||
});
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
const result = await service.expireGrant(GRANT_ID);
|
||||
|
||||
expect(db._mocks.setMock).toHaveBeenCalledWith({ status: 'expired' });
|
||||
expect(result.status).toBe('expired');
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is pending', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'pending' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.expireGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is already expired', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'expired' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.expireGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
|
||||
it('throws ConflictException when grant is revoked', async () => {
|
||||
db = makeDb({ selectRows: [makeMockGrant({ status: 'revoked' })] });
|
||||
service = new GrantsService(db as unknown as Db);
|
||||
|
||||
await expect(service.expireGrant(GRANT_ID)).rejects.toBeInstanceOf(ConflictException);
|
||||
});
|
||||
});
|
||||
});
|
||||
63
apps/gateway/src/federation/__tests__/peer-key.spec.ts
Normal file
63
apps/gateway/src/federation/__tests__/peer-key.spec.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import { sealClientKey, unsealClientKey } from '../peer-key.util.js';
|
||||
|
||||
const TEST_SECRET = 'test-secret-for-peer-key-unit-tests-only';
|
||||
|
||||
const TEST_PEM = `-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC7o4qne60TB3wo
|
||||
pCOW8QqstpxEBpnFo37JxLYEJbpE3gUlJajsHv9UWRQ7m5B7n+MBXwTCQqMEY8Wl
|
||||
kHv9tGgz1YGwzBjNKxPJXE6pPTXQ1Oa0VB9l3qHdqF5HtZoJzE0c6dO8HJ5YUVL
|
||||
-----END PRIVATE KEY-----`;
|
||||
|
||||
let savedSecret: string | undefined;
|
||||
|
||||
beforeEach(() => {
|
||||
savedSecret = process.env['BETTER_AUTH_SECRET'];
|
||||
process.env['BETTER_AUTH_SECRET'] = TEST_SECRET;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (savedSecret === undefined) {
|
||||
delete process.env['BETTER_AUTH_SECRET'];
|
||||
} else {
|
||||
process.env['BETTER_AUTH_SECRET'] = savedSecret;
|
||||
}
|
||||
});
|
||||
|
||||
describe('peer-key seal/unseal', () => {
|
||||
it('round-trip: unsealClientKey(sealClientKey(pem)) returns original pem', () => {
|
||||
const sealed = sealClientKey(TEST_PEM);
|
||||
const roundTripped = unsealClientKey(sealed);
|
||||
expect(roundTripped).toBe(TEST_PEM);
|
||||
});
|
||||
|
||||
it('non-determinism: sealClientKey produces different ciphertext each call', () => {
|
||||
const sealed1 = sealClientKey(TEST_PEM);
|
||||
const sealed2 = sealClientKey(TEST_PEM);
|
||||
expect(sealed1).not.toBe(sealed2);
|
||||
});
|
||||
|
||||
it('at-rest: sealed output does not contain plaintext PEM content', () => {
|
||||
const sealed = sealClientKey(TEST_PEM);
|
||||
expect(sealed).not.toContain('PRIVATE KEY');
|
||||
expect(sealed).not.toContain(
|
||||
'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC7o4qne60TB3wo',
|
||||
);
|
||||
});
|
||||
|
||||
it('tamper: flipping a byte in the sealed payload causes unseal to throw', () => {
|
||||
const sealed = sealClientKey(TEST_PEM);
|
||||
const buf = Buffer.from(sealed, 'base64');
|
||||
// Flip a byte in the middle of the buffer (past IV and authTag)
|
||||
const midpoint = Math.floor(buf.length / 2);
|
||||
buf[midpoint] = buf[midpoint]! ^ 0xff;
|
||||
const tampered = buf.toString('base64');
|
||||
expect(() => unsealClientKey(tampered)).toThrow();
|
||||
});
|
||||
|
||||
it('missing secret: unsealClientKey throws when BETTER_AUTH_SECRET is unset', () => {
|
||||
const sealed = sealClientKey(TEST_PEM);
|
||||
delete process.env['BETTER_AUTH_SECRET'];
|
||||
expect(() => unsealClientKey(sealed)).toThrow('BETTER_AUTH_SECRET is not set');
|
||||
});
|
||||
});
|
||||
57
apps/gateway/src/federation/ca.dto.ts
Normal file
57
apps/gateway/src/federation/ca.dto.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
/**
|
||||
* DTOs for the Step-CA client service (FED-M2-04).
|
||||
*
|
||||
* IssueCertRequestDto — input to CaService.issueCert()
|
||||
* IssuedCertDto — output from CaService.issueCert()
|
||||
*/
|
||||
|
||||
import { IsInt, IsNotEmpty, IsOptional, IsString, IsUUID, Max, Min } from 'class-validator';
|
||||
|
||||
export class IssueCertRequestDto {
|
||||
/**
|
||||
* PEM-encoded PKCS#10 Certificate Signing Request.
|
||||
* The CSR must already include the desired SANs.
|
||||
*/
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
csrPem!: string;
|
||||
|
||||
/**
|
||||
* UUID of the federation_grants row this certificate is being issued for.
|
||||
* Embedded as the `mosaic_grant_id` custom OID extension.
|
||||
*/
|
||||
@IsUUID()
|
||||
grantId!: string;
|
||||
|
||||
/**
|
||||
* UUID of the local user on whose behalf the cert is being issued.
|
||||
* Embedded as the `mosaic_subject_user_id` custom OID extension.
|
||||
*/
|
||||
@IsUUID()
|
||||
subjectUserId!: string;
|
||||
|
||||
/**
|
||||
* Requested certificate validity in seconds.
|
||||
* Hard cap: 900 s (15 minutes). Default: 300 s (5 minutes).
|
||||
* The service will always clamp to 900 s regardless of this value.
|
||||
*/
|
||||
@IsOptional()
|
||||
@IsInt()
|
||||
@Min(60)
|
||||
@Max(15 * 60)
|
||||
ttlSeconds: number = 300;
|
||||
}
|
||||
|
||||
export class IssuedCertDto {
|
||||
/** PEM-encoded leaf certificate returned by step-ca. */
|
||||
certPem!: string;
|
||||
|
||||
/**
|
||||
* PEM-encoded full certificate chain (leaf + intermediates + root).
|
||||
* Falls back to `certPem` when step-ca returns no `certChain` field.
|
||||
*/
|
||||
certChainPem!: string;
|
||||
|
||||
/** Decimal serial number string of the issued certificate. */
|
||||
serialNumber!: string;
|
||||
}
|
||||
577
apps/gateway/src/federation/ca.service.spec.ts
Normal file
577
apps/gateway/src/federation/ca.service.spec.ts
Normal file
@@ -0,0 +1,577 @@
|
||||
/**
|
||||
* Unit tests for CaService — Step-CA client (FED-M2-04).
|
||||
*
|
||||
* Coverage:
|
||||
* - Happy path: returns IssuedCertDto with certPem, certChainPem, serialNumber
|
||||
* - certChainPem fallback: falls back to certPem when certChain absent
|
||||
* - certChainPem from ca field: uses crt+ca when certChain absent but ca present
|
||||
* - HTTP 401: throws CaServiceError with cause + remediation
|
||||
* - HTTP non-401 error: throws CaServiceError
|
||||
* - Malformed CSR: throws before HTTP call (INVALID_CSR)
|
||||
* - Non-JSON response: throws CaServiceError
|
||||
* - HTTPS connection error: throws CaServiceError
|
||||
* - JWT custom claims: mosaic_grant_id and mosaic_subject_user_id present in OTT payload
|
||||
* verified with jose.jwtVerify (real signature check)
|
||||
* - CaServiceError: has cause + remediation properties
|
||||
* - Missing crt in response: throws CaServiceError
|
||||
* - Real CSR validation: valid P-256 CSR passes; malformed CSR fails with INVALID_CSR
|
||||
* - provisionerPassword never appears in CaServiceError messages
|
||||
* - HTTPS-only enforcement: http:// URL throws in constructor
|
||||
*/
|
||||
|
||||
import 'reflect-metadata';
|
||||
import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest';
|
||||
import { jwtVerify, exportJWK, generateKeyPair } from 'jose';
|
||||
import { Pkcs10CertificateRequestGenerator } from '@peculiar/x509';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Mock node:https BEFORE importing CaService so the mock is in place when
|
||||
// the module is loaded. Vitest/ESM require vi.mock at the top level.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
vi.mock('node:https', () => {
|
||||
const mockRequest = vi.fn();
|
||||
const mockAgent = vi.fn().mockImplementation(() => ({}));
|
||||
return {
|
||||
default: { request: mockRequest, Agent: mockAgent },
|
||||
request: mockRequest,
|
||||
Agent: mockAgent,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('node:fs', () => {
|
||||
const mockReadFileSync = vi
|
||||
.fn()
|
||||
.mockReturnValue('-----BEGIN CERTIFICATE-----\nFAKEROOT\n-----END CERTIFICATE-----\n');
|
||||
return {
|
||||
default: { readFileSync: mockReadFileSync },
|
||||
readFileSync: mockReadFileSync,
|
||||
};
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Real self-signed EC P-256 certificate generated with openssl for testing.
|
||||
// openssl req -x509 -newkey ec -pkeyopt ec_paramgen_curve:P-256 -nodes -keyout /dev/null \
|
||||
// -out /dev/stdout -subj "/CN=test" -days 1
|
||||
const FAKE_CERT_PEM = `-----BEGIN CERTIFICATE-----
|
||||
MIIBdDCCARmgAwIBAgIUM+iUJSayN+PwXkyVN6qwSY7sr6gwCgYIKoZIzj0EAwIw
|
||||
DzENMAsGA1UEAwwEdGVzdDAeFw0yNjA0MjIwMzE5MTlaFw0yNjA0MjMwMzE5MTla
|
||||
MA8xDTALBgNVBAMMBHRlc3QwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAR21kHL
|
||||
n1GmFQ4TEBw3EA53pD+2McIBf5WcoHE+x0eMz5DpRKJe0ksHwOVN5Yev5d57kb+4
|
||||
MvG1LhbHCB/uQo8So1MwUTAdBgNVHQ4EFgQUPq0pdIGiQ7pLBRXICS8GTliCrLsw
|
||||
HwYDVR0jBBgwFoAUPq0pdIGiQ7pLBRXICS8GTliCrLswDwYDVR0TAQH/BAUwAwEB
|
||||
/zAKBggqhkjOPQQDAgNJADBGAiEAypJqyC6S77aQ3eEXokM6sgAsD7Oa3tJbCbVm
|
||||
zG3uJb0CIQC1w+GE+Ad0OTR5Quja46R1RjOo8ydpzZ7Fh4rouAiwEw==
|
||||
-----END CERTIFICATE-----
|
||||
`;
|
||||
|
||||
// Use a second copy of the same cert for the CA field in tests.
|
||||
const FAKE_CA_PEM = FAKE_CERT_PEM;
|
||||
|
||||
const GRANT_ID = 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11';
|
||||
const SUBJECT_USER_ID = 'b1ffcd00-0d1c-5f09-cc7e-7cc0ce491b22';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Generate a real EC P-256 key pair and CSR for integration-style tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// We generate this once at module level so it's available to all tests.
|
||||
// The key pair and CSR PEM are populated asynchronously in the test that needs them.
|
||||
|
||||
let realCsrPem: string;
|
||||
|
||||
async function generateRealCsr(): Promise<string> {
|
||||
const { privateKey, publicKey } = await generateKeyPair('ES256');
|
||||
// Export public key JWK for potential verification (not used here but confirms key is exportable)
|
||||
await exportJWK(publicKey);
|
||||
|
||||
// Use @peculiar/x509 to build a proper CSR
|
||||
const csr = await Pkcs10CertificateRequestGenerator.create({
|
||||
name: 'CN=test.federation.local',
|
||||
signingAlgorithm: { name: 'ECDSA', hash: 'SHA-256' },
|
||||
keys: { privateKey, publicKey },
|
||||
});
|
||||
|
||||
return csr.toString('pem');
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Setup env before importing service
|
||||
// We use an EC P-256 key pair here so the JWK-based signing works.
|
||||
// The key pair is generated once and stored in module-level vars.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Real EC P-256 test JWK (test-only, never used in production).
|
||||
// Generated with node webcrypto for use in unit tests.
|
||||
const TEST_EC_PRIVATE_JWK = {
|
||||
key_ops: ['sign'],
|
||||
ext: true,
|
||||
kty: 'EC',
|
||||
x: 'Xq2RjZctcPcUMU14qfjs3MtZTmFk8z1lFGQyypgXZOU',
|
||||
y: 't8w9Cbt4RVmR47Wnb_i5cLwefEnMcvwse049zu9Rl_E',
|
||||
crv: 'P-256',
|
||||
d: 'TM6N79w1HE-PiML5Td4mbXfJaLHEaZrVyVrrwlJv7q8',
|
||||
kid: 'test-ec-kid',
|
||||
};
|
||||
|
||||
const TEST_EC_PUBLIC_JWK = {
|
||||
key_ops: ['verify'],
|
||||
ext: true,
|
||||
kty: 'EC',
|
||||
x: 'Xq2RjZctcPcUMU14qfjs3MtZTmFk8z1lFGQyypgXZOU',
|
||||
y: 't8w9Cbt4RVmR47Wnb_i5cLwefEnMcvwse049zu9Rl_E',
|
||||
crv: 'P-256',
|
||||
kid: 'test-ec-kid',
|
||||
};
|
||||
|
||||
process.env['STEP_CA_URL'] = 'https://step-ca:9000';
|
||||
process.env['STEP_CA_PROVISIONER_KEY_JSON'] = JSON.stringify(TEST_EC_PRIVATE_JWK);
|
||||
process.env['STEP_CA_ROOT_CERT_PATH'] = '/fake/root.pem';
|
||||
|
||||
// Import AFTER env is set and mocks are registered
|
||||
import * as httpsModule from 'node:https';
|
||||
import { CaService, CaServiceError } from './ca.service.js';
|
||||
import type { IssueCertRequestDto } from './ca.dto.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helper to build a mock https.request that simulates step-ca
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function makeHttpsMock(statusCode: number, body: unknown, errorMsg?: string): void {
|
||||
const mockReq = {
|
||||
write: vi.fn(),
|
||||
end: vi.fn(),
|
||||
on: vi.fn(),
|
||||
setTimeout: vi.fn(),
|
||||
};
|
||||
|
||||
(httpsModule.request as unknown as Mock).mockImplementation(
|
||||
(
|
||||
_options: unknown,
|
||||
callback: (res: {
|
||||
statusCode: number;
|
||||
on: (event: string, cb: (chunk?: Buffer) => void) => void;
|
||||
}) => void,
|
||||
) => {
|
||||
const mockRes = {
|
||||
statusCode,
|
||||
on: (event: string, cb: (chunk?: Buffer) => void) => {
|
||||
if (event === 'data') {
|
||||
if (body !== undefined) {
|
||||
cb(Buffer.from(typeof body === 'string' ? body : JSON.stringify(body)));
|
||||
}
|
||||
}
|
||||
if (event === 'end') {
|
||||
cb();
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
if (errorMsg) {
|
||||
// Simulate a connection error via the req.on('error') handler
|
||||
mockReq.on.mockImplementation((event: string, cb: (err: Error) => void) => {
|
||||
if (event === 'error') {
|
||||
setImmediate(() => cb(new Error(errorMsg)));
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// Normal flow: call the response callback
|
||||
setImmediate(() => callback(mockRes));
|
||||
}
|
||||
|
||||
return mockReq;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe('CaService', () => {
|
||||
let service: CaService;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
service = new CaService();
|
||||
});
|
||||
|
||||
function makeReq(overrides: Partial<IssueCertRequestDto> = {}): IssueCertRequestDto {
|
||||
// Use a real CSR if available; fall back to a minimal placeholder
|
||||
const defaultCsr = realCsrPem ?? makeFakeCsr();
|
||||
return {
|
||||
csrPem: defaultCsr,
|
||||
grantId: GRANT_ID,
|
||||
subjectUserId: SUBJECT_USER_ID,
|
||||
ttlSeconds: 300,
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeFakeCsr(): string {
|
||||
// A structurally valid-looking CSR header/footer (body will fail crypto verify)
|
||||
return `-----BEGIN CERTIFICATE REQUEST-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0000000000000000AAAA\n-----END CERTIFICATE REQUEST-----\n`;
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Real CSR generation — runs once and populates realCsrPem
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('generates a real P-256 CSR that passes validateCsr', async () => {
|
||||
realCsrPem = await generateRealCsr();
|
||||
expect(realCsrPem).toMatch(/BEGIN CERTIFICATE REQUEST/);
|
||||
|
||||
// Now test that the service's validateCsr accepts it.
|
||||
// We call it indirectly via issueCert with a successful mock.
|
||||
makeHttpsMock(200, { crt: FAKE_CERT_PEM, certChain: [FAKE_CERT_PEM, FAKE_CA_PEM] });
|
||||
const result = await service.issueCert(makeReq({ csrPem: realCsrPem }));
|
||||
expect(result.certPem).toBe(FAKE_CERT_PEM);
|
||||
});
|
||||
|
||||
it('throws INVALID_CSR for a malformed PEM-shaped CSR', async () => {
|
||||
const malformedCsr =
|
||||
'-----BEGIN CERTIFICATE REQUEST-----\nTm90QVJlYWxDU1I=\n-----END CERTIFICATE REQUEST-----\n';
|
||||
|
||||
await expect(service.issueCert(makeReq({ csrPem: malformedCsr }))).rejects.toSatisfy(
|
||||
(err: unknown) => {
|
||||
if (!(err instanceof CaServiceError)) return false;
|
||||
expect(err.code).toBe('INVALID_CSR');
|
||||
return true;
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Happy path
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('returns IssuedCertDto on success (certChain present)', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(200, {
|
||||
crt: FAKE_CERT_PEM,
|
||||
certChain: [FAKE_CERT_PEM, FAKE_CA_PEM],
|
||||
});
|
||||
|
||||
const result = await service.issueCert(makeReq());
|
||||
|
||||
expect(result.certPem).toBe(FAKE_CERT_PEM);
|
||||
expect(result.certChainPem).toContain(FAKE_CERT_PEM);
|
||||
expect(result.certChainPem).toContain(FAKE_CA_PEM);
|
||||
expect(typeof result.serialNumber).toBe('string');
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// certChainPem fallback — certChain absent, ca field present
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('builds certChainPem from crt+ca when certChain is absent', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(200, {
|
||||
crt: FAKE_CERT_PEM,
|
||||
ca: FAKE_CA_PEM,
|
||||
});
|
||||
|
||||
const result = await service.issueCert(makeReq());
|
||||
|
||||
expect(result.certPem).toBe(FAKE_CERT_PEM);
|
||||
expect(result.certChainPem).toContain(FAKE_CERT_PEM);
|
||||
expect(result.certChainPem).toContain(FAKE_CA_PEM);
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// certChainPem fallback — no certChain, no ca field
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('falls back to certPem alone when certChain and ca are absent', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(200, { crt: FAKE_CERT_PEM });
|
||||
|
||||
const result = await service.issueCert(makeReq());
|
||||
|
||||
expect(result.certPem).toBe(FAKE_CERT_PEM);
|
||||
expect(result.certChainPem).toBe(FAKE_CERT_PEM);
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// HTTP 401
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws CaServiceError on HTTP 401', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(401, { message: 'Unauthorized' });
|
||||
|
||||
await expect(service.issueCert(makeReq())).rejects.toSatisfy((err: unknown) => {
|
||||
if (!(err instanceof CaServiceError)) return false;
|
||||
expect(err.message).toMatch(/401/);
|
||||
expect(err.remediation).toBeTruthy();
|
||||
return true;
|
||||
});
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// HTTP non-401 error (e.g. 422)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws CaServiceError on HTTP 422', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(422, { message: 'Unprocessable Entity' });
|
||||
|
||||
await expect(service.issueCert(makeReq())).rejects.toBeInstanceOf(CaServiceError);
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Malformed CSR — throws before HTTP call
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws CaServiceError for malformed CSR without making HTTP call', async () => {
|
||||
const requestSpy = vi.spyOn(httpsModule, 'request');
|
||||
|
||||
await expect(service.issueCert(makeReq({ csrPem: 'not-a-valid-csr' }))).rejects.toBeInstanceOf(
|
||||
CaServiceError,
|
||||
);
|
||||
|
||||
expect(requestSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Non-JSON response
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws CaServiceError when step-ca returns non-JSON', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(200, 'this is not json');
|
||||
|
||||
await expect(service.issueCert(makeReq())).rejects.toSatisfy((err: unknown) => {
|
||||
if (!(err instanceof CaServiceError)) return false;
|
||||
expect(err.message).toMatch(/non-JSON/);
|
||||
return true;
|
||||
});
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// HTTPS connection error
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws CaServiceError on HTTPS connection error', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(0, undefined, 'connect ECONNREFUSED 127.0.0.1:9000');
|
||||
|
||||
await expect(service.issueCert(makeReq())).rejects.toSatisfy((err: unknown) => {
|
||||
if (!(err instanceof CaServiceError)) return false;
|
||||
expect(err.message).toMatch(/HTTPS connection/);
|
||||
expect(err.cause).toBeInstanceOf(Error);
|
||||
return true;
|
||||
});
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// JWT custom claims: mosaic_grant_id and mosaic_subject_user_id
|
||||
// Verified with jose.jwtVerify for real signature verification (M6)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('OTT contains mosaic_grant_id, mosaic_subject_user_id, and jti; signature verifies with jose', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
|
||||
let capturedBody: Record<string, unknown> | undefined;
|
||||
|
||||
const mockReq = {
|
||||
write: vi.fn((data: string) => {
|
||||
capturedBody = JSON.parse(data) as Record<string, unknown>;
|
||||
}),
|
||||
end: vi.fn(),
|
||||
on: vi.fn(),
|
||||
setTimeout: vi.fn(),
|
||||
};
|
||||
|
||||
(httpsModule.request as unknown as Mock).mockImplementation(
|
||||
(
|
||||
_options: unknown,
|
||||
callback: (res: {
|
||||
statusCode: number;
|
||||
on: (event: string, cb: (chunk?: Buffer) => void) => void;
|
||||
}) => void,
|
||||
) => {
|
||||
const mockRes = {
|
||||
statusCode: 200,
|
||||
on: (event: string, cb: (chunk?: Buffer) => void) => {
|
||||
if (event === 'data') {
|
||||
cb(Buffer.from(JSON.stringify({ crt: FAKE_CERT_PEM })));
|
||||
}
|
||||
if (event === 'end') {
|
||||
cb();
|
||||
}
|
||||
},
|
||||
};
|
||||
setImmediate(() => callback(mockRes));
|
||||
return mockReq;
|
||||
},
|
||||
);
|
||||
|
||||
await service.issueCert(makeReq({ csrPem: realCsrPem }));
|
||||
|
||||
expect(capturedBody).toBeDefined();
|
||||
const ott = capturedBody!['ott'] as string;
|
||||
expect(typeof ott).toBe('string');
|
||||
|
||||
// Verify JWT structure
|
||||
const parts = ott.split('.');
|
||||
expect(parts).toHaveLength(3);
|
||||
|
||||
// Decode payload without signature check first
|
||||
const payloadJson = Buffer.from(parts[1]!, 'base64url').toString('utf8');
|
||||
const payload = JSON.parse(payloadJson) as Record<string, unknown>;
|
||||
|
||||
expect(payload['mosaic_grant_id']).toBe(GRANT_ID);
|
||||
expect(payload['mosaic_subject_user_id']).toBe(SUBJECT_USER_ID);
|
||||
expect(typeof payload['jti']).toBe('string'); // M2: jti present
|
||||
expect(payload['jti']).toMatch(/^[0-9a-f-]{36}$/); // UUID format
|
||||
|
||||
// M3: top-level sha should NOT be present; step.sha should be present
|
||||
expect(payload['sha']).toBeUndefined();
|
||||
const step = payload['step'] as Record<string, unknown> | undefined;
|
||||
expect(step?.['sha']).toBeDefined();
|
||||
|
||||
// M6: Verify signature with jose.jwtVerify using the public key
|
||||
const { importJWK: importJose } = await import('jose');
|
||||
const publicKey = await importJose(TEST_EC_PUBLIC_JWK, 'ES256');
|
||||
const verified = await jwtVerify(ott, publicKey);
|
||||
expect(verified.payload['mosaic_grant_id']).toBe(GRANT_ID);
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// CaServiceError has cause + remediation
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('CaServiceError carries cause and remediation', () => {
|
||||
const cause = new Error('original error');
|
||||
const err = new CaServiceError('something went wrong', 'fix it like this', cause);
|
||||
|
||||
expect(err).toBeInstanceOf(Error);
|
||||
expect(err).toBeInstanceOf(CaServiceError);
|
||||
expect(err.message).toBe('something went wrong');
|
||||
expect(err.remediation).toBe('fix it like this');
|
||||
expect(err.cause).toBe(cause);
|
||||
expect(err.name).toBe('CaServiceError');
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Missing crt in response
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws CaServiceError when response is missing the crt field', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(200, { ca: FAKE_CA_PEM });
|
||||
|
||||
await expect(service.issueCert(makeReq())).rejects.toSatisfy((err: unknown) => {
|
||||
if (!(err instanceof CaServiceError)) return false;
|
||||
expect(err.message).toMatch(/missing the "crt" field/);
|
||||
return true;
|
||||
});
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// M6: provisionerPassword must never appear in CaServiceError messages
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('provisionerPassword does not appear in any CaServiceError message', async () => {
|
||||
// Temporarily set a recognizable password to test against
|
||||
const originalPassword = process.env['STEP_CA_PROVISIONER_PASSWORD'];
|
||||
process.env['STEP_CA_PROVISIONER_PASSWORD'] = 'super-secret-password-12345';
|
||||
|
||||
// Generate a bad CSR to trigger an error path
|
||||
const caughtErrors: CaServiceError[] = [];
|
||||
try {
|
||||
await service.issueCert(makeReq({ csrPem: 'not-a-csr' }));
|
||||
} catch (err) {
|
||||
if (err instanceof CaServiceError) {
|
||||
caughtErrors.push(err);
|
||||
}
|
||||
}
|
||||
|
||||
// Also try HTTP 401 path
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
makeHttpsMock(401, { message: 'Unauthorized' });
|
||||
try {
|
||||
await service.issueCert(makeReq({ csrPem: realCsrPem }));
|
||||
} catch (err) {
|
||||
if (err instanceof CaServiceError) {
|
||||
caughtErrors.push(err);
|
||||
}
|
||||
}
|
||||
|
||||
for (const err of caughtErrors) {
|
||||
expect(err.message).not.toContain('super-secret-password-12345');
|
||||
if (err.remediation) {
|
||||
expect(err.remediation).not.toContain('super-secret-password-12345');
|
||||
}
|
||||
}
|
||||
|
||||
process.env['STEP_CA_PROVISIONER_PASSWORD'] = originalPassword;
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// M7: HTTPS-only enforcement in constructor
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('throws in constructor if STEP_CA_URL uses http://', () => {
|
||||
const originalUrl = process.env['STEP_CA_URL'];
|
||||
process.env['STEP_CA_URL'] = 'http://step-ca:9000';
|
||||
|
||||
expect(() => new CaService()).toThrow(CaServiceError);
|
||||
|
||||
process.env['STEP_CA_URL'] = originalUrl;
|
||||
});
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// TTL clamp: ttlSeconds is clamped to 900 s (15 min) maximum
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
it('clamps ttlSeconds to 900 s regardless of input', async () => {
|
||||
if (!realCsrPem) realCsrPem = await generateRealCsr();
|
||||
|
||||
let capturedBody: Record<string, unknown> | undefined;
|
||||
|
||||
const mockReq = {
|
||||
write: vi.fn((data: string) => {
|
||||
capturedBody = JSON.parse(data) as Record<string, unknown>;
|
||||
}),
|
||||
end: vi.fn(),
|
||||
on: vi.fn(),
|
||||
setTimeout: vi.fn(),
|
||||
};
|
||||
|
||||
(httpsModule.request as unknown as Mock).mockImplementation(
|
||||
(
|
||||
_options: unknown,
|
||||
callback: (res: {
|
||||
statusCode: number;
|
||||
on: (event: string, cb: (chunk?: Buffer) => void) => void;
|
||||
}) => void,
|
||||
) => {
|
||||
const mockRes = {
|
||||
statusCode: 200,
|
||||
on: (event: string, cb: (chunk?: Buffer) => void) => {
|
||||
if (event === 'data') {
|
||||
cb(Buffer.from(JSON.stringify({ crt: FAKE_CERT_PEM })));
|
||||
}
|
||||
if (event === 'end') {
|
||||
cb();
|
||||
}
|
||||
},
|
||||
};
|
||||
setImmediate(() => callback(mockRes));
|
||||
return mockReq;
|
||||
},
|
||||
);
|
||||
|
||||
// Request 86400 s — should be clamped to 900
|
||||
await service.issueCert(makeReq({ ttlSeconds: 86400 }));
|
||||
|
||||
expect(capturedBody).toBeDefined();
|
||||
const validity = capturedBody!['validity'] as Record<string, unknown>;
|
||||
expect(validity['duration']).toBe('900s');
|
||||
});
|
||||
});
|
||||
680
apps/gateway/src/federation/ca.service.ts
Normal file
680
apps/gateway/src/federation/ca.service.ts
Normal file
@@ -0,0 +1,680 @@
|
||||
/**
|
||||
* CaService — Step-CA client for federation grant certificate issuance.
|
||||
*
|
||||
* Responsibilities:
|
||||
* 1. Build a JWK-provisioner One-Time Token (OTT) signed with the provisioner
|
||||
* private key (ES256/ES384/RS256 per JWK kty/crv) carrying Mosaic-specific
|
||||
* claims (`mosaic_grant_id`, `mosaic_subject_user_id`, `step.sha`) per the
|
||||
* step-ca JWK provisioner protocol.
|
||||
* 2. POST the CSR + OTT to the step-ca `/1.0/sign` endpoint over HTTPS,
|
||||
* pinning the trust to the CA root cert supplied via env.
|
||||
* 3. Return an IssuedCertDto containing the leaf cert, full chain, and
|
||||
* serial number.
|
||||
*
|
||||
* Environment variables (all required at runtime — validated in constructor):
|
||||
* STEP_CA_URL https://step-ca:9000
|
||||
* STEP_CA_PROVISIONER_KEY_JSON JWK provisioner private key (JSON)
|
||||
* STEP_CA_ROOT_CERT_PATH Absolute path to the CA root PEM
|
||||
*
|
||||
* Optional (only used for JWK PBES2 decrypt at startup if key is encrypted):
|
||||
* STEP_CA_PROVISIONER_PASSWORD JWK provisioner password (raw string)
|
||||
*
|
||||
* Custom OID registry (PRD §6, docs/federation/SETUP.md):
|
||||
* 1.3.6.1.4.1.99999.1 — mosaic_grant_id
|
||||
* 1.3.6.1.4.1.99999.2 — mosaic_subject_user_id
|
||||
*
|
||||
* Fail-loud contract:
|
||||
* Every error path throws CaServiceError with a human-readable `remediation`
|
||||
* field. Silent OID-stripping is NEVER allowed — if the sign response does
|
||||
* not include the cert, we throw rather than return a cert that may be
|
||||
* missing the custom extensions.
|
||||
*/
|
||||
|
||||
import { Injectable, Logger } from '@nestjs/common';
|
||||
import * as crypto from 'node:crypto';
|
||||
import * as fs from 'node:fs';
|
||||
import * as https from 'node:https';
|
||||
import { SignJWT, importJWK } from 'jose';
|
||||
import { Pkcs10CertificateRequest, X509Certificate } from '@peculiar/x509';
|
||||
import type { IssueCertRequestDto } from './ca.dto.js';
|
||||
import { IssuedCertDto } from './ca.dto.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Custom error class
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class CaServiceError extends Error {
|
||||
readonly cause: unknown;
|
||||
readonly remediation: string;
|
||||
readonly code?: string;
|
||||
|
||||
constructor(message: string, remediation: string, cause?: unknown, code?: string) {
|
||||
super(message);
|
||||
this.name = 'CaServiceError';
|
||||
this.cause = cause;
|
||||
this.remediation = remediation;
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internal types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface StepSignResponse {
|
||||
crt: string;
|
||||
ca?: string;
|
||||
certChain?: string[];
|
||||
}
|
||||
|
||||
interface JwkKey {
|
||||
kty: string;
|
||||
kid?: string;
|
||||
use?: string;
|
||||
alg?: string;
|
||||
k?: string; // symmetric
|
||||
n?: string; // RSA
|
||||
e?: string;
|
||||
d?: string;
|
||||
x?: string; // EC
|
||||
y?: string;
|
||||
crv?: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** UUID regex for validation */
|
||||
const UUID_RE = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
|
||||
|
||||
/**
|
||||
* Derive the JWT algorithm string from a JWK's kty/crv fields.
|
||||
* EC P-256 → ES256, EC P-384 → ES384, RSA → RS256.
|
||||
*/
|
||||
function algFromJwk(jwk: JwkKey): string {
|
||||
if (jwk.alg) return jwk.alg;
|
||||
if (jwk.kty === 'EC') {
|
||||
if (jwk.crv === 'P-384') return 'ES384';
|
||||
return 'ES256'; // default for P-256 and Ed25519-style EC keys
|
||||
}
|
||||
if (jwk.kty === 'RSA') return 'RS256';
|
||||
throw new CaServiceError(
|
||||
`Unsupported JWK kty: ${jwk.kty}`,
|
||||
'STEP_CA_PROVISIONER_KEY_JSON must be an EC (P-256/P-384) or RSA JWK private key.',
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute SHA-256 fingerprint of the DER-encoded CSR body.
|
||||
* step-ca uses this as the `step.sha` claim to bind the OTT to a specific CSR.
|
||||
*/
|
||||
function csrFingerprint(csrPem: string): string {
|
||||
// Strip PEM headers and decode base64 body
|
||||
const b64 = csrPem
|
||||
.replace(/-----BEGIN CERTIFICATE REQUEST-----/, '')
|
||||
.replace(/-----END CERTIFICATE REQUEST-----/, '')
|
||||
.replace(/\s+/g, '');
|
||||
|
||||
let derBuf: Buffer;
|
||||
try {
|
||||
derBuf = Buffer.from(b64, 'base64');
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
'Failed to base64-decode the CSR PEM body',
|
||||
'Verify that csrPem is a valid PKCS#10 PEM-encoded certificate request.',
|
||||
err,
|
||||
);
|
||||
}
|
||||
|
||||
if (derBuf.length === 0) {
|
||||
throw new CaServiceError(
|
||||
'CSR PEM decoded to empty buffer — malformed input',
|
||||
'Provide a valid non-empty PKCS#10 PEM-encoded certificate request.',
|
||||
);
|
||||
}
|
||||
|
||||
return crypto.createHash('sha256').update(derBuf).digest('hex');
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a JSON POST to the step-ca sign endpoint.
|
||||
* Returns the parsed response body or throws CaServiceError.
|
||||
*/
|
||||
function httpsPost(url: string, body: unknown, agent: https.Agent): Promise<StepSignResponse> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const bodyStr = JSON.stringify(body);
|
||||
const parsed = new URL(url);
|
||||
|
||||
const options: https.RequestOptions = {
|
||||
hostname: parsed.hostname,
|
||||
port: parsed.port ? parseInt(parsed.port, 10) : 443,
|
||||
path: parsed.pathname,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': Buffer.byteLength(bodyStr),
|
||||
},
|
||||
agent,
|
||||
timeout: 5000,
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
const chunks: Buffer[] = [];
|
||||
res.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
res.on('end', () => {
|
||||
const raw = Buffer.concat(chunks).toString('utf8');
|
||||
|
||||
if (res.statusCode === 401) {
|
||||
reject(
|
||||
new CaServiceError(
|
||||
`step-ca returned HTTP 401 — invalid or expired OTT`,
|
||||
'Check STEP_CA_PROVISIONER_KEY_JSON. Ensure the mosaic-fed provisioner is configured in the CA.',
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (res.statusCode && res.statusCode >= 400) {
|
||||
reject(
|
||||
new CaServiceError(
|
||||
`step-ca returned HTTP ${res.statusCode}: ${raw.slice(0, 256)}`,
|
||||
`Review the step-ca logs. Status ${res.statusCode} may indicate a CSR policy violation or misconfigured provisioner.`,
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(raw) as unknown;
|
||||
} catch (err) {
|
||||
reject(
|
||||
new CaServiceError(
|
||||
'step-ca returned a non-JSON response',
|
||||
'Verify STEP_CA_URL points to a running step-ca instance and that TLS is properly configured.',
|
||||
err,
|
||||
),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
resolve(parsed as StepSignResponse);
|
||||
});
|
||||
});
|
||||
|
||||
req.setTimeout(5000, () => {
|
||||
req.destroy(new Error('Request timed out after 5000ms'));
|
||||
});
|
||||
|
||||
req.on('error', (err: Error) => {
|
||||
reject(
|
||||
new CaServiceError(
|
||||
`HTTPS connection to step-ca failed: ${err.message}`,
|
||||
'Ensure STEP_CA_URL is reachable and STEP_CA_ROOT_CERT_PATH points to the correct CA root certificate.',
|
||||
err,
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
req.write(bodyStr);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a decimal serial number from a PEM certificate.
|
||||
* Throws CaServiceError on failure — never silently returns 'unknown'.
|
||||
*/
|
||||
function extractSerial(certPem: string): string {
|
||||
let cert: crypto.X509Certificate;
|
||||
try {
|
||||
cert = new crypto.X509Certificate(certPem);
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
'Failed to parse the issued certificate PEM',
|
||||
'The certificate returned by step-ca could not be parsed. Check that step-ca is returning a valid PEM certificate.',
|
||||
err,
|
||||
'CERT_PARSE',
|
||||
);
|
||||
}
|
||||
return cert.serialNumber;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Service
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@Injectable()
|
||||
export class CaService {
|
||||
private readonly logger = new Logger(CaService.name);
|
||||
|
||||
private readonly caUrl: string;
|
||||
private readonly rootCertPath: string;
|
||||
private readonly httpsAgent: https.Agent;
|
||||
private readonly jwk: JwkKey;
|
||||
private cachedPrivateKey: crypto.KeyObject | null = null;
|
||||
private readonly jwtAlg: string;
|
||||
private readonly kid: string;
|
||||
|
||||
constructor() {
|
||||
const caUrl = process.env['STEP_CA_URL'];
|
||||
const provisionerKeyJson = process.env['STEP_CA_PROVISIONER_KEY_JSON'];
|
||||
const rootCertPath = process.env['STEP_CA_ROOT_CERT_PATH'];
|
||||
|
||||
if (!caUrl) {
|
||||
throw new CaServiceError(
|
||||
'STEP_CA_URL is not set',
|
||||
'Set STEP_CA_URL to the base URL of the step-ca instance, e.g. https://step-ca:9000',
|
||||
);
|
||||
}
|
||||
|
||||
// Enforce HTTPS-only URL
|
||||
let parsedUrl: URL;
|
||||
try {
|
||||
parsedUrl = new URL(caUrl);
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
`STEP_CA_URL is not a valid URL: ${caUrl}`,
|
||||
'Set STEP_CA_URL to a valid HTTPS URL, e.g. https://step-ca:9000',
|
||||
err,
|
||||
);
|
||||
}
|
||||
if (parsedUrl.protocol !== 'https:') {
|
||||
throw new CaServiceError(
|
||||
`STEP_CA_URL must use HTTPS — got: ${parsedUrl.protocol}`,
|
||||
'Set STEP_CA_URL to an https:// URL. Unencrypted connections to the CA are not permitted.',
|
||||
);
|
||||
}
|
||||
|
||||
if (!provisionerKeyJson) {
|
||||
throw new CaServiceError(
|
||||
'STEP_CA_PROVISIONER_KEY_JSON is not set',
|
||||
'Set STEP_CA_PROVISIONER_KEY_JSON to the JSON-encoded JWK for the mosaic-fed provisioner.',
|
||||
);
|
||||
}
|
||||
if (!rootCertPath) {
|
||||
throw new CaServiceError(
|
||||
'STEP_CA_ROOT_CERT_PATH is not set',
|
||||
'Set STEP_CA_ROOT_CERT_PATH to the absolute path of the step-ca root CA certificate PEM file.',
|
||||
);
|
||||
}
|
||||
|
||||
// Parse JWK once — do NOT store the raw JSON string as a class field
|
||||
let jwk: JwkKey;
|
||||
try {
|
||||
jwk = JSON.parse(provisionerKeyJson) as JwkKey;
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
'STEP_CA_PROVISIONER_KEY_JSON is not valid JSON',
|
||||
'Set STEP_CA_PROVISIONER_KEY_JSON to the JSON-serialised JWK object for the mosaic-fed provisioner.',
|
||||
err,
|
||||
);
|
||||
}
|
||||
|
||||
// Derive algorithm from JWK metadata
|
||||
const jwtAlg = algFromJwk(jwk);
|
||||
const kid = jwk.kid ?? 'mosaic-fed';
|
||||
|
||||
// Import the JWK into a native KeyObject — fail loudly if it cannot be loaded.
|
||||
// We do this synchronously here by calling the async importJWK via a blocking workaround.
|
||||
// Actually importJWK is async, so we store it for use during token building.
|
||||
// We keep the raw jwk object for later async import inside buildOtt.
|
||||
// NOTE: We do NOT store provisionerKeyJson string as a class field.
|
||||
this.jwk = jwk;
|
||||
this.jwtAlg = jwtAlg;
|
||||
this.kid = kid;
|
||||
|
||||
this.caUrl = caUrl;
|
||||
this.rootCertPath = rootCertPath;
|
||||
|
||||
// Read the root cert and pin it for all HTTPS connections.
|
||||
let rootCert: string;
|
||||
try {
|
||||
rootCert = fs.readFileSync(this.rootCertPath, 'utf8');
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
`Cannot read STEP_CA_ROOT_CERT_PATH: ${rootCertPath}`,
|
||||
'Ensure the file exists and is readable by the gateway process.',
|
||||
err,
|
||||
);
|
||||
}
|
||||
|
||||
this.httpsAgent = new https.Agent({
|
||||
ca: rootCert,
|
||||
rejectUnauthorized: true,
|
||||
});
|
||||
|
||||
this.logger.log(`CaService initialised — CA URL: ${this.caUrl}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazily import the private key from JWK on first use.
|
||||
* The key is cached in cachedPrivateKey after first import.
|
||||
*/
|
||||
private async getPrivateKey(): Promise<crypto.KeyObject> {
|
||||
if (this.cachedPrivateKey !== null) return this.cachedPrivateKey;
|
||||
try {
|
||||
const key = await importJWK(this.jwk, this.jwtAlg);
|
||||
// importJWK returns KeyLike (crypto.KeyObject | Uint8Array) — in Node.js it's KeyObject
|
||||
this.cachedPrivateKey = key as unknown as crypto.KeyObject;
|
||||
return this.cachedPrivateKey;
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
'Failed to import STEP_CA_PROVISIONER_KEY_JSON as a cryptographic key',
|
||||
'Ensure STEP_CA_PROVISIONER_KEY_JSON contains a valid JWK private key (EC P-256/P-384 or RSA).',
|
||||
err,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the JWK-provisioner OTT signed with the provisioner private key.
|
||||
* Algorithm is derived from the JWK kty/crv fields.
|
||||
*/
|
||||
private async buildOtt(params: {
|
||||
csrPem: string;
|
||||
grantId: string;
|
||||
subjectUserId: string;
|
||||
ttlSeconds: number;
|
||||
csrCn: string;
|
||||
}): Promise<string> {
|
||||
const { csrPem, grantId, subjectUserId, ttlSeconds, csrCn } = params;
|
||||
|
||||
// Validate UUID shape for grant id and subject user id
|
||||
if (!UUID_RE.test(grantId)) {
|
||||
throw new CaServiceError(
|
||||
`grantId is not a valid UUID: ${grantId}`,
|
||||
'Provide a valid UUID (RFC 4122) for grantId.',
|
||||
undefined,
|
||||
'INVALID_GRANT_ID',
|
||||
);
|
||||
}
|
||||
if (!UUID_RE.test(subjectUserId)) {
|
||||
throw new CaServiceError(
|
||||
`subjectUserId is not a valid UUID: ${subjectUserId}`,
|
||||
'Provide a valid UUID (RFC 4122) for subjectUserId.',
|
||||
undefined,
|
||||
'INVALID_GRANT_ID',
|
||||
);
|
||||
}
|
||||
|
||||
const sha = csrFingerprint(csrPem);
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const privateKey = await this.getPrivateKey();
|
||||
|
||||
const ott = await new SignJWT({
|
||||
iss: this.kid,
|
||||
sub: csrCn, // M1: set sub to identity from CSR CN
|
||||
aud: [`${this.caUrl}/1.0/sign`],
|
||||
iat: now,
|
||||
nbf: now - 30, // 30 s clock-skew tolerance
|
||||
exp: now + Math.min(ttlSeconds, 3600), // OTT validity ≤ 1 h
|
||||
jti: crypto.randomUUID(), // M2: unique token ID
|
||||
// step.sha is the canonical field name used in the template — M3: keep only step.sha
|
||||
step: { sha },
|
||||
// Mosaic custom claims consumed by federation.tpl
|
||||
mosaic_grant_id: grantId,
|
||||
mosaic_subject_user_id: subjectUserId,
|
||||
})
|
||||
.setProtectedHeader({ alg: this.jwtAlg, typ: 'JWT', kid: this.kid })
|
||||
.sign(privateKey);
|
||||
|
||||
return ott;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate a PEM-encoded CSR using @peculiar/x509.
|
||||
* Verifies the self-signature, key type/size, and signature algorithm.
|
||||
* Optionally verifies that the CSR's SANs match the expected set.
|
||||
*
|
||||
* Throws CaServiceError with code 'INVALID_CSR' on failure.
|
||||
*/
|
||||
private async validateCsr(pem: string, expectedSans?: string[]): Promise<string> {
|
||||
let csr: Pkcs10CertificateRequest;
|
||||
try {
|
||||
csr = new Pkcs10CertificateRequest(pem);
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
'Failed to parse CSR PEM as a valid PKCS#10 certificate request',
|
||||
'Provide a valid PEM-encoded PKCS#10 CSR.',
|
||||
err,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
|
||||
// Verify self-signature
|
||||
let valid: boolean;
|
||||
try {
|
||||
valid = await csr.verify();
|
||||
} catch (err) {
|
||||
throw new CaServiceError(
|
||||
'CSR signature verification threw an error',
|
||||
'The CSR self-signature could not be verified. Ensure the CSR is properly formed.',
|
||||
err,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
if (!valid) {
|
||||
throw new CaServiceError(
|
||||
'CSR self-signature is invalid',
|
||||
'The CSR must be self-signed with the corresponding private key.',
|
||||
undefined,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
|
||||
// Validate signature algorithm — reject MD5 and SHA-1
|
||||
// signatureAlgorithm is HashedAlgorithm which extends Algorithm.
|
||||
// Cast through unknown to access .name and .hash.name without DOM lib globals.
|
||||
const sigAlgAny = csr.signatureAlgorithm as unknown as {
|
||||
name?: string;
|
||||
hash?: { name?: string };
|
||||
};
|
||||
const sigAlgName = (sigAlgAny.name ?? '').toLowerCase();
|
||||
const hashName = (sigAlgAny.hash?.name ?? '').toLowerCase();
|
||||
if (
|
||||
sigAlgName.includes('md5') ||
|
||||
sigAlgName.includes('sha1') ||
|
||||
hashName === 'sha-1' ||
|
||||
hashName === 'sha1'
|
||||
) {
|
||||
throw new CaServiceError(
|
||||
`CSR uses a forbidden signature algorithm: ${sigAlgAny.name ?? 'unknown'}`,
|
||||
'Use SHA-256 or stronger. MD5 and SHA-1 are not permitted.',
|
||||
undefined,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
|
||||
// Validate public key algorithm and strength via the algorithm descriptor on the key.
|
||||
// csr.publicKey.algorithm is type Algorithm (WebCrypto) — use name-based checks.
|
||||
// We cast to an extended interface to access curve/modulus info without DOM globals.
|
||||
const pubKeyAlgo = csr.publicKey.algorithm as {
|
||||
name: string;
|
||||
namedCurve?: string;
|
||||
modulusLength?: number;
|
||||
};
|
||||
const keyAlgoName = pubKeyAlgo.name;
|
||||
|
||||
if (keyAlgoName === 'RSASSA-PKCS1-v1_5' || keyAlgoName === 'RSA-PSS') {
|
||||
const modulusLength = pubKeyAlgo.modulusLength ?? 0;
|
||||
if (modulusLength < 2048) {
|
||||
throw new CaServiceError(
|
||||
`CSR RSA key is too short: ${modulusLength} bits (minimum 2048)`,
|
||||
'Use an RSA key of at least 2048 bits.',
|
||||
undefined,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
} else if (keyAlgoName === 'ECDSA') {
|
||||
const namedCurve = pubKeyAlgo.namedCurve ?? '';
|
||||
const allowedCurves = new Set(['P-256', 'P-384']);
|
||||
if (!allowedCurves.has(namedCurve)) {
|
||||
throw new CaServiceError(
|
||||
`CSR EC key uses disallowed curve: ${namedCurve}`,
|
||||
'Use EC P-256 or P-384. Other curves are not permitted.',
|
||||
undefined,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
} else if (keyAlgoName === 'Ed25519') {
|
||||
// Ed25519 is explicitly allowed
|
||||
} else {
|
||||
throw new CaServiceError(
|
||||
`CSR uses unsupported key algorithm: ${keyAlgoName}`,
|
||||
'Use EC (P-256/P-384), Ed25519, or RSA (≥2048 bit) keys.',
|
||||
undefined,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
|
||||
// Extract SANs if expectedSans provided
|
||||
if (expectedSans && expectedSans.length > 0) {
|
||||
// Get SANs from CSR extensions
|
||||
const sanExtension = csr.extensions?.find(
|
||||
(ext) => ext.type === '2.5.29.17', // Subject Alternative Name OID
|
||||
);
|
||||
const csrSans: string[] = [];
|
||||
if (sanExtension) {
|
||||
// Parse the raw SAN extension — store as stringified for comparison
|
||||
// @peculiar/x509 exposes SANs through the parsed extension
|
||||
const sanExt = sanExtension as { names?: Array<{ type: string; value: string }> };
|
||||
if (sanExt.names) {
|
||||
for (const name of sanExt.names) {
|
||||
csrSans.push(name.value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const csrSanSet = new Set(csrSans);
|
||||
const expectedSanSet = new Set(expectedSans);
|
||||
const missing = expectedSans.filter((s) => !csrSanSet.has(s));
|
||||
const extra = csrSans.filter((s) => !expectedSanSet.has(s));
|
||||
|
||||
if (missing.length > 0 || extra.length > 0) {
|
||||
throw new CaServiceError(
|
||||
`CSR SANs do not match expected set. Missing: [${missing.join(', ')}], Extra: [${extra.join(', ')}]`,
|
||||
'The CSR must include exactly the SANs specified in the issuance request.',
|
||||
undefined,
|
||||
'INVALID_CSR',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Return the CN from the CSR subject for use as JWT sub
|
||||
const cn = csr.subjectName.getField('CN')?.[0] ?? '';
|
||||
return cn;
|
||||
}
|
||||
|
||||
/**
|
||||
* Submit a CSR to step-ca and return the issued certificate.
|
||||
*
|
||||
* Throws `CaServiceError` on any failure (network, auth, malformed input).
|
||||
* Never silently swallows errors — fail-loud is a hard contract per M2-02 review.
|
||||
*/
|
||||
async issueCert(req: IssueCertRequestDto): Promise<IssuedCertDto> {
|
||||
// Clamp TTL to 15-minute maximum (H2)
|
||||
const ttl = Math.min(req.ttlSeconds ?? 300, 900);
|
||||
|
||||
this.logger.debug(
|
||||
`issueCert — grantId=${req.grantId} subjectUserId=${req.subjectUserId} ttl=${ttl}s`,
|
||||
);
|
||||
|
||||
// Validate CSR — real cryptographic validation (H3)
|
||||
const csrCn = await this.validateCsr(req.csrPem);
|
||||
|
||||
const ott = await this.buildOtt({
|
||||
csrPem: req.csrPem,
|
||||
grantId: req.grantId,
|
||||
subjectUserId: req.subjectUserId,
|
||||
ttlSeconds: ttl,
|
||||
csrCn,
|
||||
});
|
||||
|
||||
const signUrl = `${this.caUrl}/1.0/sign`;
|
||||
const requestBody = {
|
||||
csr: req.csrPem,
|
||||
ott,
|
||||
validity: {
|
||||
duration: `${ttl}s`,
|
||||
},
|
||||
};
|
||||
|
||||
this.logger.debug(`Posting CSR to ${signUrl}`);
|
||||
const response = await httpsPost(signUrl, requestBody, this.httpsAgent);
|
||||
|
||||
if (!response.crt) {
|
||||
throw new CaServiceError(
|
||||
'step-ca sign response missing the "crt" field',
|
||||
'This is unexpected — the step-ca instance may be misconfigured or running an incompatible version.',
|
||||
);
|
||||
}
|
||||
|
||||
// Build certChainPem: prefer certChain array, fall back to ca field, fall back to crt alone.
|
||||
let certChainPem: string;
|
||||
if (response.certChain && response.certChain.length > 0) {
|
||||
certChainPem = response.certChain.join('\n');
|
||||
} else if (response.ca) {
|
||||
certChainPem = response.crt + '\n' + response.ca;
|
||||
} else {
|
||||
certChainPem = response.crt;
|
||||
}
|
||||
|
||||
const serialNumber = extractSerial(response.crt);
|
||||
|
||||
// CRIT-1: Verify the issued certificate contains both Mosaic OID extensions
|
||||
// with the correct values. Step-CA's federation.tpl encodes each as an ASN.1
|
||||
// UTF8String TLV: tag 0x0C + 1-byte length + UUID bytes. We skip 2 bytes
|
||||
// (tag + length) to extract the raw UUID string.
|
||||
const issuedCert = new X509Certificate(response.crt);
|
||||
const decoder = new TextDecoder();
|
||||
|
||||
const grantIdExt = issuedCert.getExtension('1.3.6.1.4.1.99999.1');
|
||||
if (!grantIdExt) {
|
||||
throw new CaServiceError(
|
||||
'Issued certificate is missing required Mosaic OID: mosaic_grant_id',
|
||||
'The Step-CA federation.tpl template did not embed OID 1.3.6.1.4.1.99999.1. Check the provisioner template configuration.',
|
||||
undefined,
|
||||
'OID_MISSING',
|
||||
);
|
||||
}
|
||||
const grantIdInCert = decoder.decode(grantIdExt.value.slice(2));
|
||||
if (grantIdInCert !== req.grantId) {
|
||||
throw new CaServiceError(
|
||||
`Issued certificate mosaic_grant_id mismatch: expected ${req.grantId}, got ${grantIdInCert}`,
|
||||
'The Step-CA issued a certificate with a different grant ID than requested. This may indicate a provisioner misconfiguration or a MITM.',
|
||||
undefined,
|
||||
'OID_MISMATCH',
|
||||
);
|
||||
}
|
||||
|
||||
const subjectUserIdExt = issuedCert.getExtension('1.3.6.1.4.1.99999.2');
|
||||
if (!subjectUserIdExt) {
|
||||
throw new CaServiceError(
|
||||
'Issued certificate is missing required Mosaic OID: mosaic_subject_user_id',
|
||||
'The Step-CA federation.tpl template did not embed OID 1.3.6.1.4.1.99999.2. Check the provisioner template configuration.',
|
||||
undefined,
|
||||
'OID_MISSING',
|
||||
);
|
||||
}
|
||||
const subjectUserIdInCert = decoder.decode(subjectUserIdExt.value.slice(2));
|
||||
if (subjectUserIdInCert !== req.subjectUserId) {
|
||||
throw new CaServiceError(
|
||||
`Issued certificate mosaic_subject_user_id mismatch: expected ${req.subjectUserId}, got ${subjectUserIdInCert}`,
|
||||
'The Step-CA issued a certificate with a different subject user ID than requested. This may indicate a provisioner misconfiguration or a MITM.',
|
||||
undefined,
|
||||
'OID_MISMATCH',
|
||||
);
|
||||
}
|
||||
|
||||
this.logger.log(`Certificate issued — serial=${serialNumber} grantId=${req.grantId}`);
|
||||
|
||||
const result = new IssuedCertDto();
|
||||
result.certPem = response.crt;
|
||||
result.certChainPem = certChainPem;
|
||||
result.serialNumber = serialNumber;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
54
apps/gateway/src/federation/enrollment.controller.ts
Normal file
54
apps/gateway/src/federation/enrollment.controller.ts
Normal file
@@ -0,0 +1,54 @@
|
||||
/**
|
||||
* EnrollmentController — federation enrollment HTTP layer (FED-M2-07).
|
||||
*
|
||||
* Routes:
|
||||
* POST /api/federation/enrollment/tokens — admin creates a single-use token
|
||||
* POST /api/federation/enrollment/:token — unauthenticated; token IS the auth
|
||||
*/
|
||||
|
||||
import {
|
||||
Body,
|
||||
Controller,
|
||||
HttpCode,
|
||||
HttpStatus,
|
||||
Inject,
|
||||
Param,
|
||||
Post,
|
||||
UseGuards,
|
||||
} from '@nestjs/common';
|
||||
import { AdminGuard } from '../admin/admin.guard.js';
|
||||
import { EnrollmentService } from './enrollment.service.js';
|
||||
import { CreateEnrollmentTokenDto, RedeemEnrollmentTokenDto } from './enrollment.dto.js';
|
||||
|
||||
@Controller('api/federation/enrollment')
|
||||
export class EnrollmentController {
|
||||
constructor(@Inject(EnrollmentService) private readonly enrollmentService: EnrollmentService) {}
|
||||
|
||||
/**
|
||||
* Admin-only: generate a single-use enrollment token for a pending grant.
|
||||
* The token should be distributed out-of-band to the remote peer operator.
|
||||
*
|
||||
* POST /api/federation/enrollment/tokens
|
||||
*/
|
||||
@Post('tokens')
|
||||
@UseGuards(AdminGuard)
|
||||
@HttpCode(HttpStatus.CREATED)
|
||||
async createToken(@Body() dto: CreateEnrollmentTokenDto) {
|
||||
return this.enrollmentService.createToken(dto);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unauthenticated: remote peer redeems a token by submitting its CSR.
|
||||
* The token itself is the credential — no session or bearer token required.
|
||||
*
|
||||
* POST /api/federation/enrollment/:token
|
||||
*
|
||||
* Returns the signed leaf cert and full chain PEM on success.
|
||||
* Returns 410 Gone if the token was already used or has expired.
|
||||
*/
|
||||
@Post(':token')
|
||||
@HttpCode(HttpStatus.OK)
|
||||
async redeem(@Param('token') token: string, @Body() dto: RedeemEnrollmentTokenDto) {
|
||||
return this.enrollmentService.redeem(token, dto.csrPem);
|
||||
}
|
||||
}
|
||||
35
apps/gateway/src/federation/enrollment.dto.ts
Normal file
35
apps/gateway/src/federation/enrollment.dto.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
/**
|
||||
* DTOs for the federation enrollment flow (FED-M2-07).
|
||||
*
|
||||
* CreateEnrollmentTokenDto — admin generates a single-use enrollment token
|
||||
* RedeemEnrollmentTokenDto — remote peer submits CSR to redeem the token
|
||||
*/
|
||||
|
||||
import { IsInt, IsNotEmpty, IsOptional, IsString, IsUUID, Max, Min } from 'class-validator';
|
||||
|
||||
export class CreateEnrollmentTokenDto {
|
||||
/** UUID of the federation grant this token will activate on redemption. */
|
||||
@IsUUID()
|
||||
grantId!: string;
|
||||
|
||||
/** UUID of the peer record that will receive the issued cert on redemption. */
|
||||
@IsUUID()
|
||||
peerId!: string;
|
||||
|
||||
/**
|
||||
* Token lifetime in seconds. Default 900 (15 min). Min 60. Max 900.
|
||||
* After this time the token is rejected even if unused.
|
||||
*/
|
||||
@IsOptional()
|
||||
@IsInt()
|
||||
@Min(60)
|
||||
@Max(900)
|
||||
ttlSeconds: number = 900;
|
||||
}
|
||||
|
||||
export class RedeemEnrollmentTokenDto {
|
||||
/** PEM-encoded PKCS#10 Certificate Signing Request from the remote peer. */
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
csrPem!: string;
|
||||
}
|
||||
281
apps/gateway/src/federation/enrollment.service.ts
Normal file
281
apps/gateway/src/federation/enrollment.service.ts
Normal file
@@ -0,0 +1,281 @@
|
||||
/**
|
||||
* EnrollmentService — single-use enrollment token lifecycle (FED-M2-07).
|
||||
*
|
||||
* Responsibilities:
|
||||
* 1. Generate time-limited single-use enrollment tokens (admin action).
|
||||
* 2. Redeem a token: validate → atomically claim token → issue cert via
|
||||
* CaService → transactionally activate grant + update peer + write audit.
|
||||
*
|
||||
* Replay protection: the token is claimed (UPDATE WHERE used_at IS NULL) BEFORE
|
||||
* cert issuance. This prevents double cert minting on concurrent requests.
|
||||
* If cert issuance fails after claim, the token is consumed and the grant
|
||||
* stays pending — admin must create a new grant.
|
||||
*/
|
||||
|
||||
import {
|
||||
BadRequestException,
|
||||
ConflictException,
|
||||
GoneException,
|
||||
Inject,
|
||||
Injectable,
|
||||
Logger,
|
||||
NotFoundException,
|
||||
} from '@nestjs/common';
|
||||
import * as crypto from 'node:crypto';
|
||||
// X509Certificate is available as a named export in Node.js ≥ 15.6
|
||||
const { X509Certificate } = crypto;
|
||||
import {
|
||||
type Db,
|
||||
and,
|
||||
eq,
|
||||
isNull,
|
||||
sql,
|
||||
federationEnrollmentTokens,
|
||||
federationGrants,
|
||||
federationPeers,
|
||||
federationAuditLog,
|
||||
} from '@mosaicstack/db';
|
||||
import { DB } from '../database/database.module.js';
|
||||
import { CaService } from './ca.service.js';
|
||||
import { GrantsService } from './grants.service.js';
|
||||
import { FederationScopeError } from './scope-schema.js';
|
||||
import type { CreateEnrollmentTokenDto } from './enrollment.dto.js';
|
||||
|
||||
export interface EnrollmentTokenResult {
|
||||
token: string;
|
||||
expiresAt: string;
|
||||
}
|
||||
|
||||
export interface RedeemResult {
|
||||
certPem: string;
|
||||
certChainPem: string;
|
||||
}
|
||||
|
||||
@Injectable()
|
||||
export class EnrollmentService {
|
||||
private readonly logger = new Logger(EnrollmentService.name);
|
||||
|
||||
constructor(
|
||||
@Inject(DB) private readonly db: Db,
|
||||
private readonly caService: CaService,
|
||||
private readonly grantsService: GrantsService,
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Generate a single-use enrollment token for an admin to distribute
|
||||
* out-of-band to the remote peer operator.
|
||||
*/
|
||||
async createToken(dto: CreateEnrollmentTokenDto): Promise<EnrollmentTokenResult> {
|
||||
const ttl = Math.min(dto.ttlSeconds, 900);
|
||||
|
||||
// MED-3: Verify the grantId ↔ peerId binding — prevents attacker from
|
||||
// cross-wiring grants to attacker-controlled peers.
|
||||
const [grant] = await this.db
|
||||
.select({ peerId: federationGrants.peerId })
|
||||
.from(federationGrants)
|
||||
.where(eq(federationGrants.id, dto.grantId))
|
||||
.limit(1);
|
||||
if (!grant) {
|
||||
throw new NotFoundException(`Grant ${dto.grantId} not found`);
|
||||
}
|
||||
if (grant.peerId !== dto.peerId) {
|
||||
throw new BadRequestException(`peerId does not match the grant's registered peer`);
|
||||
}
|
||||
|
||||
const token = crypto.randomBytes(32).toString('hex');
|
||||
const expiresAt = new Date(Date.now() + ttl * 1000);
|
||||
|
||||
await this.db.insert(federationEnrollmentTokens).values({
|
||||
token,
|
||||
grantId: dto.grantId,
|
||||
peerId: dto.peerId,
|
||||
expiresAt,
|
||||
});
|
||||
|
||||
this.logger.log(
|
||||
`Enrollment token created — grantId=${dto.grantId} peerId=${dto.peerId} expiresAt=${expiresAt.toISOString()}`,
|
||||
);
|
||||
|
||||
return { token, expiresAt: expiresAt.toISOString() };
|
||||
}
|
||||
|
||||
/**
|
||||
* Redeem an enrollment token.
|
||||
*
|
||||
* Full flow:
|
||||
* 1. Fetch token row — NotFoundException if not found
|
||||
* 2. usedAt set → GoneException (already used)
|
||||
* 3. expiresAt < now → GoneException (expired)
|
||||
* 4. Load grant — verify status is 'pending'
|
||||
* 5. Atomically claim token (UPDATE WHERE used_at IS NULL RETURNING token)
|
||||
* — if no rows returned, concurrent request won → GoneException
|
||||
* 6. Issue cert via CaService (network call, outside transaction)
|
||||
* — if this fails, token is consumed; grant stays pending; admin must recreate
|
||||
* 7. Transaction: activate grant + update peer record + write audit log
|
||||
* 8. Return { certPem, certChainPem }
|
||||
*/
|
||||
async redeem(token: string, csrPem: string): Promise<RedeemResult> {
|
||||
// HIGH-5: Track outcome so we can write a failure audit row on any error.
|
||||
let outcome: 'allowed' | 'denied' = 'denied';
|
||||
// row may be undefined if the token is not found — used defensively in catch.
|
||||
let row: typeof federationEnrollmentTokens.$inferSelect | undefined;
|
||||
|
||||
try {
|
||||
// 1. Fetch token row
|
||||
const [fetchedRow] = await this.db
|
||||
.select()
|
||||
.from(federationEnrollmentTokens)
|
||||
.where(eq(federationEnrollmentTokens.token, token))
|
||||
.limit(1);
|
||||
|
||||
if (!fetchedRow) {
|
||||
throw new NotFoundException('Enrollment token not found');
|
||||
}
|
||||
row = fetchedRow;
|
||||
|
||||
// 2. Already used?
|
||||
if (row.usedAt !== null) {
|
||||
throw new GoneException('Enrollment token has already been used');
|
||||
}
|
||||
|
||||
// 3. Expired?
|
||||
if (row.expiresAt < new Date()) {
|
||||
throw new GoneException('Enrollment token has expired');
|
||||
}
|
||||
|
||||
// 4. Load grant and verify it is still pending
|
||||
let grant;
|
||||
try {
|
||||
grant = await this.grantsService.getGrant(row.grantId);
|
||||
} catch (err) {
|
||||
if (err instanceof FederationScopeError) {
|
||||
throw new BadRequestException(err.message);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
if (grant.status !== 'pending') {
|
||||
throw new GoneException(
|
||||
`Grant ${row.grantId} is no longer pending (status: ${grant.status})`,
|
||||
);
|
||||
}
|
||||
|
||||
// 5. Atomically claim the token BEFORE cert issuance to prevent double-minting.
|
||||
// WHERE used_at IS NULL ensures only one concurrent request wins.
|
||||
// Using .returning() works on both node-postgres and PGlite without rowCount inspection.
|
||||
const claimed = await this.db
|
||||
.update(federationEnrollmentTokens)
|
||||
.set({ usedAt: sql`NOW()` })
|
||||
.where(
|
||||
and(
|
||||
eq(federationEnrollmentTokens.token, token),
|
||||
isNull(federationEnrollmentTokens.usedAt),
|
||||
),
|
||||
)
|
||||
.returning({ token: federationEnrollmentTokens.token });
|
||||
|
||||
if (claimed.length === 0) {
|
||||
throw new GoneException('Enrollment token has already been used (concurrent request)');
|
||||
}
|
||||
|
||||
// 6. Issue certificate via CaService (network call — outside any transaction).
|
||||
// If this throws, the token is already consumed. The grant stays pending.
|
||||
// Admin must revoke the grant and create a new one.
|
||||
let issued;
|
||||
try {
|
||||
issued = await this.caService.issueCert({
|
||||
csrPem,
|
||||
grantId: row.grantId,
|
||||
subjectUserId: grant.subjectUserId,
|
||||
ttlSeconds: 300,
|
||||
});
|
||||
} catch (err) {
|
||||
// HIGH-4: Log only the first 8 hex chars of the token for correlation — never log the full token.
|
||||
this.logger.error(
|
||||
`issueCert failed after token ${token.slice(0, 8)}... was claimed — grant ${row.grantId} is stranded pending`,
|
||||
err instanceof Error ? err.stack : String(err),
|
||||
);
|
||||
if (err instanceof FederationScopeError) {
|
||||
throw new BadRequestException((err as Error).message);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
// 7. Atomically activate grant, update peer record, and write audit log.
|
||||
const certNotAfter = this.extractCertNotAfter(issued.certPem);
|
||||
await this.db.transaction(async (tx) => {
|
||||
// CRIT-2: Guard activation with WHERE status='pending' to prevent double-activation.
|
||||
const [activated] = await tx
|
||||
.update(federationGrants)
|
||||
.set({ status: 'active' })
|
||||
.where(and(eq(federationGrants.id, row!.grantId), eq(federationGrants.status, 'pending')))
|
||||
.returning({ id: federationGrants.id });
|
||||
if (!activated) {
|
||||
throw new ConflictException(
|
||||
`Grant ${row!.grantId} is no longer pending — cannot activate`,
|
||||
);
|
||||
}
|
||||
|
||||
// CRIT-2: Guard peer update with WHERE state='pending'.
|
||||
await tx
|
||||
.update(federationPeers)
|
||||
.set({
|
||||
certPem: issued.certPem,
|
||||
certSerial: issued.serialNumber,
|
||||
certNotAfter,
|
||||
state: 'active',
|
||||
})
|
||||
.where(and(eq(federationPeers.id, row!.peerId), eq(federationPeers.state, 'pending')));
|
||||
|
||||
await tx.insert(federationAuditLog).values({
|
||||
requestId: crypto.randomUUID(),
|
||||
peerId: row!.peerId,
|
||||
grantId: row!.grantId,
|
||||
verb: 'enrollment',
|
||||
resource: 'federation_grant',
|
||||
statusCode: 200,
|
||||
outcome: 'allowed',
|
||||
});
|
||||
});
|
||||
|
||||
this.logger.log(
|
||||
`Enrollment complete — peerId=${row.peerId} grantId=${row.grantId} serial=${issued.serialNumber}`,
|
||||
);
|
||||
|
||||
outcome = 'allowed';
|
||||
|
||||
// 8. Return cert material
|
||||
return {
|
||||
certPem: issued.certPem,
|
||||
certChainPem: issued.certChainPem,
|
||||
};
|
||||
} catch (err) {
|
||||
// HIGH-5: Best-effort audit write on failure — do not let this throw.
|
||||
if (outcome === 'denied') {
|
||||
await this.db
|
||||
.insert(federationAuditLog)
|
||||
.values({
|
||||
requestId: crypto.randomUUID(),
|
||||
peerId: row?.peerId ?? null,
|
||||
grantId: row?.grantId ?? null,
|
||||
verb: 'enrollment',
|
||||
resource: 'federation_grant',
|
||||
statusCode:
|
||||
err instanceof GoneException ? 410 : err instanceof NotFoundException ? 404 : 500,
|
||||
outcome: 'denied',
|
||||
})
|
||||
.catch(() => {});
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the notAfter date from a PEM certificate.
|
||||
* HIGH-2: No silent fallback — a cert that cannot be parsed should fail loud.
|
||||
*/
|
||||
private extractCertNotAfter(certPem: string): Date {
|
||||
const cert = new X509Certificate(certPem);
|
||||
return new Date(cert.validTo);
|
||||
}
|
||||
}
|
||||
39
apps/gateway/src/federation/federation-admin.dto.ts
Normal file
39
apps/gateway/src/federation/federation-admin.dto.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
/**
|
||||
* DTOs for the federation admin controller (FED-M2-08).
|
||||
*/
|
||||
|
||||
import { IsInt, IsNotEmpty, IsOptional, IsString, IsUrl, Max, Min } from 'class-validator';
|
||||
|
||||
export class CreatePeerKeypairDto {
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
commonName!: string;
|
||||
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
displayName!: string;
|
||||
|
||||
@IsOptional()
|
||||
@IsUrl()
|
||||
endpointUrl?: string;
|
||||
}
|
||||
|
||||
export class StorePeerCertDto {
|
||||
@IsString()
|
||||
@IsNotEmpty()
|
||||
certPem!: string;
|
||||
}
|
||||
|
||||
export class GenerateEnrollmentTokenDto {
|
||||
@IsOptional()
|
||||
@IsInt()
|
||||
@Min(60)
|
||||
@Max(900)
|
||||
ttlSeconds: number = 900;
|
||||
}
|
||||
|
||||
export class RevokeGrantBodyDto {
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
reason?: string;
|
||||
}
|
||||
266
apps/gateway/src/federation/federation.controller.ts
Normal file
266
apps/gateway/src/federation/federation.controller.ts
Normal file
@@ -0,0 +1,266 @@
|
||||
/**
|
||||
* FederationController — admin REST API for federation management (FED-M2-08).
|
||||
*
|
||||
* Routes (all under /api/admin/federation, all require AdminGuard):
|
||||
*
|
||||
* Grant management:
|
||||
* POST /api/admin/federation/grants
|
||||
* GET /api/admin/federation/grants
|
||||
* GET /api/admin/federation/grants/:id
|
||||
* PATCH /api/admin/federation/grants/:id/revoke
|
||||
* POST /api/admin/federation/grants/:id/tokens
|
||||
*
|
||||
* Peer management:
|
||||
* GET /api/admin/federation/peers
|
||||
* POST /api/admin/federation/peers/keypair
|
||||
* PATCH /api/admin/federation/peers/:id/cert
|
||||
*
|
||||
* NOTE: The enrollment REDEMPTION endpoint (POST /api/federation/enrollment/:token)
|
||||
* is handled by EnrollmentController — not duplicated here.
|
||||
*/
|
||||
|
||||
import {
|
||||
Body,
|
||||
Controller,
|
||||
Get,
|
||||
HttpCode,
|
||||
HttpStatus,
|
||||
Inject,
|
||||
NotFoundException,
|
||||
Param,
|
||||
Patch,
|
||||
Post,
|
||||
Query,
|
||||
UseGuards,
|
||||
} from '@nestjs/common';
|
||||
import { webcrypto } from 'node:crypto';
|
||||
import { X509Certificate } from 'node:crypto';
|
||||
import { Pkcs10CertificateRequestGenerator } from '@peculiar/x509';
|
||||
import { type Db, eq, federationPeers } from '@mosaicstack/db';
|
||||
import { DB } from '../database/database.module.js';
|
||||
import { AdminGuard } from '../admin/admin.guard.js';
|
||||
import { GrantsService } from './grants.service.js';
|
||||
import { EnrollmentService } from './enrollment.service.js';
|
||||
import { sealClientKey } from './peer-key.util.js';
|
||||
import { CreateGrantDto, ListGrantsDto } from './grants.dto.js';
|
||||
import {
|
||||
CreatePeerKeypairDto,
|
||||
GenerateEnrollmentTokenDto,
|
||||
RevokeGrantBodyDto,
|
||||
StorePeerCertDto,
|
||||
} from './federation-admin.dto.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Convert an ArrayBuffer to a Base64 string (for PEM encoding).
|
||||
*/
|
||||
function arrayBufferToBase64(buf: ArrayBuffer): string {
|
||||
const bytes = new Uint8Array(buf);
|
||||
let binary = '';
|
||||
for (const b of bytes) {
|
||||
binary += String.fromCharCode(b);
|
||||
}
|
||||
return Buffer.from(binary, 'binary').toString('base64');
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap a Base64 string in PEM armour.
|
||||
*/
|
||||
function toPem(label: string, b64: string): string {
|
||||
const lines = b64.match(/.{1,64}/g) ?? [];
|
||||
return `-----BEGIN ${label}-----\n${lines.join('\n')}\n-----END ${label}-----\n`;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Controller
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
@Controller('api/admin/federation')
|
||||
@UseGuards(AdminGuard)
|
||||
export class FederationController {
|
||||
constructor(
|
||||
@Inject(DB) private readonly db: Db,
|
||||
@Inject(GrantsService) private readonly grantsService: GrantsService,
|
||||
@Inject(EnrollmentService) private readonly enrollmentService: EnrollmentService,
|
||||
) {}
|
||||
|
||||
// ─── Grant management ────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* POST /api/admin/federation/grants
|
||||
* Create a new grant in pending state.
|
||||
*/
|
||||
@Post('grants')
|
||||
@HttpCode(HttpStatus.CREATED)
|
||||
async createGrant(@Body() body: CreateGrantDto) {
|
||||
return this.grantsService.createGrant(body);
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/admin/federation/grants
|
||||
* List grants with optional filters.
|
||||
*/
|
||||
@Get('grants')
|
||||
async listGrants(@Query() query: ListGrantsDto) {
|
||||
return this.grantsService.listGrants(query);
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/admin/federation/grants/:id
|
||||
* Get a single grant by ID.
|
||||
*/
|
||||
@Get('grants/:id')
|
||||
async getGrant(@Param('id') id: string) {
|
||||
return this.grantsService.getGrant(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* PATCH /api/admin/federation/grants/:id/revoke
|
||||
* Revoke an active grant.
|
||||
*/
|
||||
@Patch('grants/:id/revoke')
|
||||
async revokeGrant(@Param('id') id: string, @Body() body: RevokeGrantBodyDto) {
|
||||
return this.grantsService.revokeGrant(id, body.reason);
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/admin/federation/grants/:id/tokens
|
||||
* Generate a single-use enrollment token for a pending grant.
|
||||
* Returns the token plus an enrollmentUrl the operator shares out-of-band.
|
||||
*/
|
||||
@Post('grants/:id/tokens')
|
||||
@HttpCode(HttpStatus.CREATED)
|
||||
async generateToken(@Param('id') id: string, @Body() body: GenerateEnrollmentTokenDto) {
|
||||
const grant = await this.grantsService.getGrant(id);
|
||||
|
||||
const result = await this.enrollmentService.createToken({
|
||||
grantId: id,
|
||||
peerId: grant.peerId,
|
||||
ttlSeconds: body.ttlSeconds ?? 900,
|
||||
});
|
||||
|
||||
const baseUrl = process.env['BETTER_AUTH_URL'] ?? 'http://localhost:14242';
|
||||
const enrollmentUrl = `${baseUrl}/api/federation/enrollment/${result.token}`;
|
||||
|
||||
return {
|
||||
token: result.token,
|
||||
expiresAt: result.expiresAt,
|
||||
enrollmentUrl,
|
||||
};
|
||||
}
|
||||
|
||||
// ─── Peer management ─────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* GET /api/admin/federation/peers
|
||||
* List all federation peer rows.
|
||||
*/
|
||||
@Get('peers')
|
||||
async listPeers() {
|
||||
return this.db.select().from(federationPeers).orderBy(federationPeers.commonName);
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/admin/federation/peers/keypair
|
||||
* Generate a new peer entry with EC P-256 key pair and a PKCS#10 CSR.
|
||||
*
|
||||
* Flow:
|
||||
* 1. Generate EC P-256 key pair via webcrypto
|
||||
* 2. Generate a self-signed CSR via @peculiar/x509
|
||||
* 3. Export private key as PEM
|
||||
* 4. sealClientKey(privatePem) → sealed blob
|
||||
* 5. Insert pending peer row
|
||||
* 6. Return { peerId, csrPem }
|
||||
*/
|
||||
@Post('peers/keypair')
|
||||
@HttpCode(HttpStatus.CREATED)
|
||||
async createPeerKeypair(@Body() body: CreatePeerKeypairDto) {
|
||||
// 1. Generate EC P-256 key pair via Web Crypto
|
||||
const keyPair = await webcrypto.subtle.generateKey(
|
||||
{ name: 'ECDSA', namedCurve: 'P-256' },
|
||||
true, // extractable
|
||||
['sign', 'verify'],
|
||||
);
|
||||
|
||||
// 2. Generate PKCS#10 CSR
|
||||
const csr = await Pkcs10CertificateRequestGenerator.create({
|
||||
name: `CN=${body.commonName}`,
|
||||
keys: keyPair,
|
||||
signingAlgorithm: { name: 'ECDSA', hash: 'SHA-256' },
|
||||
});
|
||||
|
||||
const csrPem = csr.toString('pem');
|
||||
|
||||
// 3. Export private key as PKCS#8 PEM
|
||||
const pkcs8Der = await webcrypto.subtle.exportKey('pkcs8', keyPair.privateKey);
|
||||
const privatePem = toPem('PRIVATE KEY', arrayBufferToBase64(pkcs8Der));
|
||||
|
||||
// 4. Seal the private key
|
||||
const sealed = sealClientKey(privatePem);
|
||||
|
||||
// 5. Insert pending peer row
|
||||
const [peer] = await this.db
|
||||
.insert(federationPeers)
|
||||
.values({
|
||||
commonName: body.commonName,
|
||||
displayName: body.displayName,
|
||||
certPem: '',
|
||||
certSerial: 'pending',
|
||||
certNotAfter: new Date(0),
|
||||
clientKeyPem: sealed,
|
||||
state: 'pending',
|
||||
endpointUrl: body.endpointUrl,
|
||||
})
|
||||
.returning();
|
||||
|
||||
return {
|
||||
peerId: peer!.id,
|
||||
csrPem,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* PATCH /api/admin/federation/peers/:id/cert
|
||||
* Store a signed certificate after enrollment completes.
|
||||
*
|
||||
* Flow:
|
||||
* 1. Parse the cert to extract serial and notAfter
|
||||
* 2. Update the peer row with cert data + state='active'
|
||||
* 3. Return the updated peer row
|
||||
*/
|
||||
@Patch('peers/:id/cert')
|
||||
async storePeerCert(@Param('id') id: string, @Body() body: StorePeerCertDto) {
|
||||
// Ensure peer exists
|
||||
const [existing] = await this.db
|
||||
.select({ id: federationPeers.id })
|
||||
.from(federationPeers)
|
||||
.where(eq(federationPeers.id, id))
|
||||
.limit(1);
|
||||
|
||||
if (!existing) {
|
||||
throw new NotFoundException(`Peer ${id} not found`);
|
||||
}
|
||||
|
||||
// 1. Parse cert
|
||||
const x509 = new X509Certificate(body.certPem);
|
||||
const certSerial = x509.serialNumber;
|
||||
const certNotAfter = new Date(x509.validTo);
|
||||
|
||||
// 2. Update peer
|
||||
const [updated] = await this.db
|
||||
.update(federationPeers)
|
||||
.set({
|
||||
certPem: body.certPem,
|
||||
certSerial,
|
||||
certNotAfter,
|
||||
state: 'active',
|
||||
})
|
||||
.where(eq(federationPeers.id, id))
|
||||
.returning();
|
||||
|
||||
return updated;
|
||||
}
|
||||
}
|
||||
14
apps/gateway/src/federation/federation.module.ts
Normal file
14
apps/gateway/src/federation/federation.module.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import { Module } from '@nestjs/common';
|
||||
import { AdminGuard } from '../admin/admin.guard.js';
|
||||
import { CaService } from './ca.service.js';
|
||||
import { EnrollmentController } from './enrollment.controller.js';
|
||||
import { EnrollmentService } from './enrollment.service.js';
|
||||
import { FederationController } from './federation.controller.js';
|
||||
import { GrantsService } from './grants.service.js';
|
||||
|
||||
@Module({
|
||||
controllers: [EnrollmentController, FederationController],
|
||||
providers: [AdminGuard, CaService, EnrollmentService, GrantsService],
|
||||
exports: [CaService, EnrollmentService, GrantsService],
|
||||
})
|
||||
export class FederationModule {}
|
||||
36
apps/gateway/src/federation/grants.dto.ts
Normal file
36
apps/gateway/src/federation/grants.dto.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { IsDateString, IsIn, IsObject, IsOptional, IsString, IsUUID } from 'class-validator';
|
||||
|
||||
export class CreateGrantDto {
|
||||
@IsUUID()
|
||||
peerId!: string;
|
||||
|
||||
@IsUUID()
|
||||
subjectUserId!: string;
|
||||
|
||||
@IsObject()
|
||||
scope!: Record<string, unknown>;
|
||||
|
||||
@IsOptional()
|
||||
@IsDateString()
|
||||
expiresAt?: string;
|
||||
}
|
||||
|
||||
export class ListGrantsDto {
|
||||
@IsOptional()
|
||||
@IsUUID()
|
||||
peerId?: string;
|
||||
|
||||
@IsOptional()
|
||||
@IsUUID()
|
||||
subjectUserId?: string;
|
||||
|
||||
@IsOptional()
|
||||
@IsIn(['pending', 'active', 'revoked', 'expired'])
|
||||
status?: 'pending' | 'active' | 'revoked' | 'expired';
|
||||
}
|
||||
|
||||
export class RevokeGrantDto {
|
||||
@IsOptional()
|
||||
@IsString()
|
||||
reason?: string;
|
||||
}
|
||||
161
apps/gateway/src/federation/grants.service.ts
Normal file
161
apps/gateway/src/federation/grants.service.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
/**
|
||||
* Federation grants service — CRUD + status transitions (FED-M2-06).
|
||||
*
|
||||
* Business logic only. CSR/cert work is handled by M2-07.
|
||||
*
|
||||
* Status lifecycle:
|
||||
* pending → active (activateGrant, called by M2-07 enrollment controller after cert signed)
|
||||
* active → revoked (revokeGrant)
|
||||
* active → expired (expireGrant, called by M6 scheduler)
|
||||
*/
|
||||
|
||||
import { ConflictException, Inject, Injectable, NotFoundException } from '@nestjs/common';
|
||||
import { type Db, and, eq, federationGrants } from '@mosaicstack/db';
|
||||
import { DB } from '../database/database.module.js';
|
||||
import { parseFederationScope } from './scope-schema.js';
|
||||
import type { CreateGrantDto, ListGrantsDto } from './grants.dto.js';
|
||||
|
||||
export type Grant = typeof federationGrants.$inferSelect;
|
||||
|
||||
@Injectable()
|
||||
export class GrantsService {
|
||||
constructor(@Inject(DB) private readonly db: Db) {}
|
||||
|
||||
/**
|
||||
* Create a new grant in `pending` state.
|
||||
* Validates the scope against the federation scope JSON schema before inserting.
|
||||
*/
|
||||
async createGrant(dto: CreateGrantDto): Promise<Grant> {
|
||||
// Throws FederationScopeError (a plain Error subclass) on invalid scope.
|
||||
parseFederationScope(dto.scope);
|
||||
|
||||
const [grant] = await this.db
|
||||
.insert(federationGrants)
|
||||
.values({
|
||||
peerId: dto.peerId,
|
||||
subjectUserId: dto.subjectUserId,
|
||||
scope: dto.scope,
|
||||
status: 'pending',
|
||||
expiresAt: dto.expiresAt != null ? new Date(dto.expiresAt) : null,
|
||||
})
|
||||
.returning();
|
||||
|
||||
return grant!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch a single grant by ID. Throws NotFoundException if not found.
|
||||
*/
|
||||
async getGrant(id: string): Promise<Grant> {
|
||||
const [grant] = await this.db
|
||||
.select()
|
||||
.from(federationGrants)
|
||||
.where(eq(federationGrants.id, id))
|
||||
.limit(1);
|
||||
|
||||
if (!grant) {
|
||||
throw new NotFoundException(`Grant ${id} not found`);
|
||||
}
|
||||
|
||||
return grant;
|
||||
}
|
||||
|
||||
/**
|
||||
* List grants with optional filters for peerId, subjectUserId, and status.
|
||||
*/
|
||||
async listGrants(filters: ListGrantsDto): Promise<Grant[]> {
|
||||
const conditions = [];
|
||||
|
||||
if (filters.peerId != null) {
|
||||
conditions.push(eq(federationGrants.peerId, filters.peerId));
|
||||
}
|
||||
if (filters.subjectUserId != null) {
|
||||
conditions.push(eq(federationGrants.subjectUserId, filters.subjectUserId));
|
||||
}
|
||||
if (filters.status != null) {
|
||||
conditions.push(eq(federationGrants.status, filters.status));
|
||||
}
|
||||
|
||||
if (conditions.length === 0) {
|
||||
return this.db.select().from(federationGrants);
|
||||
}
|
||||
|
||||
return this.db
|
||||
.select()
|
||||
.from(federationGrants)
|
||||
.where(and(...conditions));
|
||||
}
|
||||
|
||||
/**
|
||||
* Transition a grant from `pending` → `active`.
|
||||
* Called by M2-07 enrollment controller after cert is signed.
|
||||
* Throws ConflictException if the grant is not in `pending` state.
|
||||
*/
|
||||
async activateGrant(id: string): Promise<Grant> {
|
||||
const grant = await this.getGrant(id);
|
||||
|
||||
if (grant.status !== 'pending') {
|
||||
throw new ConflictException(
|
||||
`Grant ${id} cannot be activated: expected status 'pending', got '${grant.status}'`,
|
||||
);
|
||||
}
|
||||
|
||||
const [updated] = await this.db
|
||||
.update(federationGrants)
|
||||
.set({ status: 'active' })
|
||||
.where(eq(federationGrants.id, id))
|
||||
.returning();
|
||||
|
||||
return updated!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transition a grant from `active` → `revoked`.
|
||||
* Sets revokedAt and optionally revokedReason.
|
||||
* Throws ConflictException if the grant is not in `active` state.
|
||||
*/
|
||||
async revokeGrant(id: string, reason?: string): Promise<Grant> {
|
||||
const grant = await this.getGrant(id);
|
||||
|
||||
if (grant.status !== 'active') {
|
||||
throw new ConflictException(
|
||||
`Grant ${id} cannot be revoked: expected status 'active', got '${grant.status}'`,
|
||||
);
|
||||
}
|
||||
|
||||
const [updated] = await this.db
|
||||
.update(federationGrants)
|
||||
.set({
|
||||
status: 'revoked',
|
||||
revokedAt: new Date(),
|
||||
revokedReason: reason ?? null,
|
||||
})
|
||||
.where(eq(federationGrants.id, id))
|
||||
.returning();
|
||||
|
||||
return updated!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Transition a grant from `active` → `expired`.
|
||||
* Intended for use by the M6 scheduler.
|
||||
* Throws ConflictException if the grant is not in `active` state.
|
||||
*/
|
||||
async expireGrant(id: string): Promise<Grant> {
|
||||
const grant = await this.getGrant(id);
|
||||
|
||||
if (grant.status !== 'active') {
|
||||
throw new ConflictException(
|
||||
`Grant ${id} cannot be expired: expected status 'active', got '${grant.status}'`,
|
||||
);
|
||||
}
|
||||
|
||||
const [updated] = await this.db
|
||||
.update(federationGrants)
|
||||
.set({ status: 'expired' })
|
||||
.where(eq(federationGrants.id, id))
|
||||
.returning();
|
||||
|
||||
return updated!;
|
||||
}
|
||||
}
|
||||
9
apps/gateway/src/federation/peer-key.util.ts
Normal file
9
apps/gateway/src/federation/peer-key.util.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { seal, unseal } from '@mosaicstack/auth';
|
||||
|
||||
export function sealClientKey(privateKeyPem: string): string {
|
||||
return seal(privateKeyPem);
|
||||
}
|
||||
|
||||
export function unsealClientKey(sealedKey: string): string {
|
||||
return unseal(sealedKey);
|
||||
}
|
||||
187
apps/gateway/src/federation/scope-schema.spec.ts
Normal file
187
apps/gateway/src/federation/scope-schema.spec.ts
Normal file
@@ -0,0 +1,187 @@
|
||||
/**
|
||||
* Unit tests for FederationScopeSchema and parseFederationScope.
|
||||
*
|
||||
* Coverage:
|
||||
* - Valid: minimal scope
|
||||
* - Valid: full PRD §8.1 example
|
||||
* - Valid: resources + excluded_resources (no overlap)
|
||||
* - Invalid: empty resources
|
||||
* - Invalid: unknown resource value
|
||||
* - Invalid: resources / excluded_resources intersection
|
||||
* - Invalid: filter key not in resources
|
||||
* - Invalid: max_rows_per_query = 0
|
||||
* - Invalid: max_rows_per_query = 10001
|
||||
* - Invalid: not an object / null
|
||||
* - Defaults: include_personal defaults to true; excluded_resources defaults to []
|
||||
* - Sentinel: console.warn fires for sensitive resources
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||
import {
|
||||
parseFederationScope,
|
||||
FederationScopeError,
|
||||
FederationScopeSchema,
|
||||
} from './scope-schema.js';
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
describe('parseFederationScope — valid inputs', () => {
|
||||
it('accepts a minimal scope (resources + max_rows_per_query only)', () => {
|
||||
const scope = parseFederationScope({
|
||||
resources: ['tasks'],
|
||||
max_rows_per_query: 100,
|
||||
});
|
||||
expect(scope.resources).toEqual(['tasks']);
|
||||
expect(scope.max_rows_per_query).toBe(100);
|
||||
expect(scope.excluded_resources).toEqual([]);
|
||||
expect(scope.filters).toBeUndefined();
|
||||
});
|
||||
|
||||
it('accepts the full PRD §8.1 example', () => {
|
||||
const scope = parseFederationScope({
|
||||
resources: ['tasks', 'notes', 'memory'],
|
||||
filters: {
|
||||
tasks: { include_teams: ['team_uuid_1', 'team_uuid_2'], include_personal: true },
|
||||
notes: { include_personal: true, include_teams: [] },
|
||||
memory: { include_personal: true },
|
||||
},
|
||||
excluded_resources: ['credentials', 'api_keys'],
|
||||
max_rows_per_query: 500,
|
||||
});
|
||||
expect(scope.resources).toEqual(['tasks', 'notes', 'memory']);
|
||||
expect(scope.excluded_resources).toEqual(['credentials', 'api_keys']);
|
||||
expect(scope.filters?.tasks?.include_teams).toEqual(['team_uuid_1', 'team_uuid_2']);
|
||||
expect(scope.max_rows_per_query).toBe(500);
|
||||
});
|
||||
|
||||
it('accepts a scope with excluded_resources and no filter overlap', () => {
|
||||
const scope = parseFederationScope({
|
||||
resources: ['tasks', 'notes'],
|
||||
excluded_resources: ['memory'],
|
||||
max_rows_per_query: 250,
|
||||
});
|
||||
expect(scope.resources).toEqual(['tasks', 'notes']);
|
||||
expect(scope.excluded_resources).toEqual(['memory']);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseFederationScope — defaults', () => {
|
||||
it('defaults excluded_resources to []', () => {
|
||||
const scope = parseFederationScope({ resources: ['tasks'], max_rows_per_query: 1 });
|
||||
expect(scope.excluded_resources).toEqual([]);
|
||||
});
|
||||
|
||||
it('defaults include_personal to true when filter is provided without it', () => {
|
||||
const scope = parseFederationScope({
|
||||
resources: ['tasks'],
|
||||
filters: { tasks: { include_teams: ['t1'] } },
|
||||
max_rows_per_query: 10,
|
||||
});
|
||||
expect(scope.filters?.tasks?.include_personal).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseFederationScope — invalid inputs', () => {
|
||||
it('throws FederationScopeError for empty resources array', () => {
|
||||
expect(() => parseFederationScope({ resources: [], max_rows_per_query: 100 })).toThrow(
|
||||
FederationScopeError,
|
||||
);
|
||||
});
|
||||
|
||||
it('throws for unknown resource value in resources', () => {
|
||||
expect(() =>
|
||||
parseFederationScope({ resources: ['unknown_resource'], max_rows_per_query: 100 }),
|
||||
).toThrow(FederationScopeError);
|
||||
});
|
||||
|
||||
it('throws when resources and excluded_resources intersect', () => {
|
||||
expect(() =>
|
||||
parseFederationScope({
|
||||
resources: ['tasks', 'memory'],
|
||||
excluded_resources: ['memory'],
|
||||
max_rows_per_query: 100,
|
||||
}),
|
||||
).toThrow(FederationScopeError);
|
||||
});
|
||||
|
||||
it('throws when filters references a resource not in resources', () => {
|
||||
expect(() =>
|
||||
parseFederationScope({
|
||||
resources: ['tasks'],
|
||||
filters: { notes: { include_personal: true } },
|
||||
max_rows_per_query: 100,
|
||||
}),
|
||||
).toThrow(FederationScopeError);
|
||||
});
|
||||
|
||||
it('throws for max_rows_per_query = 0', () => {
|
||||
expect(() => parseFederationScope({ resources: ['tasks'], max_rows_per_query: 0 })).toThrow(
|
||||
FederationScopeError,
|
||||
);
|
||||
});
|
||||
|
||||
it('throws for max_rows_per_query = 10001', () => {
|
||||
expect(() => parseFederationScope({ resources: ['tasks'], max_rows_per_query: 10001 })).toThrow(
|
||||
FederationScopeError,
|
||||
);
|
||||
});
|
||||
|
||||
it('throws for null input', () => {
|
||||
expect(() => parseFederationScope(null)).toThrow(FederationScopeError);
|
||||
});
|
||||
|
||||
it('throws for non-object input (string)', () => {
|
||||
expect(() => parseFederationScope('not-an-object')).toThrow(FederationScopeError);
|
||||
});
|
||||
});
|
||||
|
||||
describe('parseFederationScope — sentinel warning', () => {
|
||||
it('emits console.warn when resources includes "credentials"', () => {
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
parseFederationScope({
|
||||
resources: ['tasks', 'credentials'],
|
||||
max_rows_per_query: 100,
|
||||
});
|
||||
expect(warnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'[FederationScope] WARNING: scope grants sensitive resource "credentials"',
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
it('emits console.warn when resources includes "api_keys"', () => {
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
parseFederationScope({
|
||||
resources: ['tasks', 'api_keys'],
|
||||
max_rows_per_query: 100,
|
||||
});
|
||||
expect(warnSpy).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'[FederationScope] WARNING: scope grants sensitive resource "api_keys"',
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
it('does NOT emit console.warn for non-sensitive resources', () => {
|
||||
const warnSpy = vi.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
parseFederationScope({ resources: ['tasks', 'notes', 'memory'], max_rows_per_query: 100 });
|
||||
expect(warnSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('FederationScopeSchema — boundary values', () => {
|
||||
it('accepts max_rows_per_query = 1 (lower bound)', () => {
|
||||
const result = FederationScopeSchema.safeParse({ resources: ['tasks'], max_rows_per_query: 1 });
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
|
||||
it('accepts max_rows_per_query = 10000 (upper bound)', () => {
|
||||
const result = FederationScopeSchema.safeParse({
|
||||
resources: ['tasks'],
|
||||
max_rows_per_query: 10000,
|
||||
});
|
||||
expect(result.success).toBe(true);
|
||||
});
|
||||
});
|
||||
147
apps/gateway/src/federation/scope-schema.ts
Normal file
147
apps/gateway/src/federation/scope-schema.ts
Normal file
@@ -0,0 +1,147 @@
|
||||
/**
|
||||
* Federation grant scope schema and validator.
|
||||
*
|
||||
* Source of truth: docs/federation/PRD.md §8.1
|
||||
*
|
||||
* This module is intentionally pure — no DB, no NestJS, no CA wiring.
|
||||
* It is reusable from grant CRUD (M2-06) and scope enforcement (M3+).
|
||||
*/
|
||||
|
||||
import { z } from 'zod';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Allowlist of federation resources (canonical — M3+ will extend this list)
|
||||
// ---------------------------------------------------------------------------
|
||||
export const FEDERATION_RESOURCE_VALUES = [
|
||||
'tasks',
|
||||
'notes',
|
||||
'memory',
|
||||
'credentials',
|
||||
'api_keys',
|
||||
] as const;
|
||||
|
||||
export type FederationResource = (typeof FEDERATION_RESOURCE_VALUES)[number];
|
||||
|
||||
/**
|
||||
* Sensitive resources require explicit admin approval (PRD §8.4).
|
||||
* The parser warns when these appear in `resources`; M2-06 grant CRUD
|
||||
* will add a hard gate on top of this warning.
|
||||
*/
|
||||
const SENSITIVE_RESOURCES: ReadonlySet<FederationResource> = new Set(['credentials', 'api_keys']);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sub-schemas
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const ResourceArraySchema = z
|
||||
.array(z.enum(FEDERATION_RESOURCE_VALUES))
|
||||
.nonempty({ message: 'resources must contain at least one value' })
|
||||
.refine((arr) => new Set(arr).size === arr.length, {
|
||||
message: 'resources must not contain duplicate values',
|
||||
});
|
||||
|
||||
const ResourceFilterSchema = z.object({
|
||||
include_teams: z.array(z.string()).optional(),
|
||||
include_personal: z.boolean().default(true),
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Top-level schema
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export const FederationScopeSchema = z
|
||||
.object({
|
||||
resources: ResourceArraySchema,
|
||||
|
||||
excluded_resources: z
|
||||
.array(z.enum(FEDERATION_RESOURCE_VALUES))
|
||||
.default([])
|
||||
.refine((arr) => new Set(arr).size === arr.length, {
|
||||
message: 'excluded_resources must not contain duplicate values',
|
||||
}),
|
||||
|
||||
filters: z.record(z.string(), ResourceFilterSchema).optional(),
|
||||
|
||||
max_rows_per_query: z
|
||||
.number()
|
||||
.int({ message: 'max_rows_per_query must be an integer' })
|
||||
.min(1, { message: 'max_rows_per_query must be at least 1' })
|
||||
.max(10000, { message: 'max_rows_per_query must be at most 10000' }),
|
||||
})
|
||||
.superRefine((data, ctx) => {
|
||||
const resourceSet = new Set(data.resources);
|
||||
|
||||
// Intersection guard: a resource cannot be both granted and excluded
|
||||
for (const r of data.excluded_resources) {
|
||||
if (resourceSet.has(r)) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: `Resource "${r}" appears in both resources and excluded_resources`,
|
||||
path: ['excluded_resources'],
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Filter keys must be a subset of resources
|
||||
if (data.filters) {
|
||||
for (const key of Object.keys(data.filters)) {
|
||||
if (!resourceSet.has(key as FederationResource)) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: `filters key "${key}" references a resource not present in resources`,
|
||||
path: ['filters', key],
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
export type FederationScope = z.infer<typeof FederationScopeSchema>;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Error class
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export class FederationScopeError extends Error {
|
||||
constructor(message: string) {
|
||||
super(message);
|
||||
this.name = 'FederationScopeError';
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Typed parser
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Parse and validate an unknown value as a FederationScope.
|
||||
*
|
||||
* Throws `FederationScopeError` with aggregated Zod issues on failure.
|
||||
*
|
||||
* Emits `console.warn` when sensitive resources (`credentials`, `api_keys`)
|
||||
* are present in `resources` — per PRD §8.4, these require explicit admin
|
||||
* approval. M2-06 grant CRUD will add a hard gate on top of this warning.
|
||||
*/
|
||||
export function parseFederationScope(input: unknown): FederationScope {
|
||||
const result = FederationScopeSchema.safeParse(input);
|
||||
|
||||
if (!result.success) {
|
||||
const issues = result.error.issues
|
||||
.map((e) => ` - [${e.path.join('.') || 'root'}] ${e.message}`)
|
||||
.join('\n');
|
||||
throw new FederationScopeError(`Invalid federation scope:\n${issues}`);
|
||||
}
|
||||
|
||||
const scope = result.data;
|
||||
|
||||
// Sentinel warning for sensitive resources (PRD §8.4)
|
||||
for (const resource of scope.resources) {
|
||||
if (SENSITIVE_RESOURCES.has(resource)) {
|
||||
console.warn(
|
||||
`[FederationScope] WARNING: scope grants sensitive resource "${resource}". Per PRD §8.4 this requires explicit admin approval and is logged.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return scope;
|
||||
}
|
||||
@@ -20,10 +20,12 @@ import { Logger, ValidationPipe } from '@nestjs/common';
|
||||
import { FastifyAdapter, type NestFastifyApplication } from '@nestjs/platform-fastify';
|
||||
import helmet from '@fastify/helmet';
|
||||
import { listSsoStartupWarnings } from '@mosaicstack/auth';
|
||||
import { loadConfig } from '@mosaicstack/config';
|
||||
import { AppModule } from './app.module.js';
|
||||
import { mountAuthHandler } from './auth/auth.controller.js';
|
||||
import { mountMcpHandler } from './mcp/mcp.controller.js';
|
||||
import { McpService } from './mcp/mcp.service.js';
|
||||
import { detectAndAssertTier, TierDetectionError } from '@mosaicstack/storage';
|
||||
|
||||
async function bootstrap(): Promise<void> {
|
||||
const logger = new Logger('Bootstrap');
|
||||
@@ -32,6 +34,20 @@ async function bootstrap(): Promise<void> {
|
||||
throw new Error('BETTER_AUTH_SECRET is required');
|
||||
}
|
||||
|
||||
// Pre-flight: assert all external services required by the configured tier
|
||||
// are reachable. Runs before NestFactory.create() so failures are visible
|
||||
// immediately with actionable remediation hints.
|
||||
const mosaicConfig = loadConfig();
|
||||
try {
|
||||
await detectAndAssertTier(mosaicConfig);
|
||||
} catch (err) {
|
||||
if (err instanceof TierDetectionError) {
|
||||
logger.error(`Tier detection failed: ${err.message}`);
|
||||
logger.error(`Remediation: ${err.remediation}`);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
for (const warning of listSsoStartupWarnings()) {
|
||||
logger.warn(warning);
|
||||
}
|
||||
|
||||
70
deploy/portainer/README.md
Normal file
70
deploy/portainer/README.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# deploy/portainer/
|
||||
|
||||
Portainer stack templates for Mosaic Stack deployments.
|
||||
|
||||
## Files
|
||||
|
||||
| File | Purpose |
|
||||
| -------------------------- | -------------------------------------------------------------------------------------------------------------- |
|
||||
| `federated-test.stack.yml` | Docker Swarm stack for federation end-to-end test instances (`mos-test-1.woltje.com`, `mos-test-2.woltje.com`) |
|
||||
|
||||
---
|
||||
|
||||
## federated-test.stack.yml
|
||||
|
||||
A self-contained Swarm stack that boots a federated-tier Mosaic gateway with co-located Postgres 17 (pgvector) and Valkey 8. This is a **test template** — production deployments will use a separate template with stricter resource limits and Docker secrets.
|
||||
|
||||
### Deploy via Portainer UI
|
||||
|
||||
1. Log into Portainer.
|
||||
2. Navigate to **Stacks → Add stack**.
|
||||
3. Set a stack name matching `STACK_NAME` below (e.g. `mos-test-1`).
|
||||
4. Choose **Web editor** and paste the contents of `federated-test.stack.yml`.
|
||||
5. Scroll to **Environment variables** and add each variable listed below.
|
||||
6. Click **Deploy the stack**.
|
||||
|
||||
### Required environment variables
|
||||
|
||||
| Variable | Example | Notes |
|
||||
| -------------------- | --------------------------------------- | -------------------------------------------------------- |
|
||||
| `STACK_NAME` | `mos-test-1` | Unique per stack — used in Traefik router/service names. |
|
||||
| `HOST_FQDN` | `mos-test-1.woltje.com` | Fully-qualified hostname served by this stack. |
|
||||
| `POSTGRES_PASSWORD` | _(generate randomly)_ | Database password. Do **not** reuse between stacks. |
|
||||
| `BETTER_AUTH_SECRET` | _(generate: `openssl rand -base64 32`)_ | BetterAuth session signing key. |
|
||||
| `BETTER_AUTH_URL` | `https://mos-test-1.woltje.com` | Public base URL of the gateway. |
|
||||
|
||||
Optional variables (uncomment in the YAML or set in Portainer):
|
||||
|
||||
| Variable | Notes |
|
||||
| ----------------------------- | ---------------------------------------------------------- |
|
||||
| `ANTHROPIC_API_KEY` | Enable Claude models. |
|
||||
| `OPENAI_API_KEY` | Enable OpenAI models. |
|
||||
| `OTEL_EXPORTER_OTLP_ENDPOINT` | Forward traces to a collector (e.g. `http://jaeger:4318`). |
|
||||
|
||||
### Required external resources
|
||||
|
||||
Before deploying, ensure the following exist on the Swarm:
|
||||
|
||||
1. **`traefik-public` overlay network** — shared network Traefik uses to route traffic to stacks.
|
||||
```bash
|
||||
docker network create --driver overlay --attachable traefik-public
|
||||
```
|
||||
2. **`letsencrypt` cert resolver** — configured in the Traefik Swarm stack. The stack template references `tls.certresolver=letsencrypt`; the name must match your Traefik config.
|
||||
3. **DNS A record** — `${HOST_FQDN}` must resolve to the Swarm ingress IP (or a Cloudflare-proxied address pointing there).
|
||||
|
||||
### Deployed instances
|
||||
|
||||
| Stack name | HOST_FQDN | Purpose |
|
||||
| ------------ | ----------------------- | ---------------------------------- |
|
||||
| `mos-test-1` | `mos-test-1.woltje.com` | DEPLOY-03 — first federation peer |
|
||||
| `mos-test-2` | `mos-test-2.woltje.com` | DEPLOY-04 — second federation peer |
|
||||
|
||||
### Image
|
||||
|
||||
The gateway image is pinned by digest to `fed-v0.1.0-m1` (verified in DEPLOY-01). Update the digest in the YAML when promoting a new build — never use `:latest` or a mutable tag in Swarm.
|
||||
|
||||
### Notes
|
||||
|
||||
- This template boots a **vanilla M1-baseline gateway** in federated tier. Federation grants (Step-CA, mTLS) are M2+ scope and not included here.
|
||||
- Each stack gets its own Postgres volume (`postgres-data`) and Valkey volume (`valkey-data`) scoped to the stack name by Swarm.
|
||||
- `depends_on` is honoured by Compose but ignored by Swarm — healthchecks on Postgres and Valkey ensure the gateway retries until they are ready.
|
||||
160
deploy/portainer/federated-test.stack.yml
Normal file
160
deploy/portainer/federated-test.stack.yml
Normal file
@@ -0,0 +1,160 @@
|
||||
# deploy/portainer/federated-test.stack.yml
|
||||
#
|
||||
# Portainer / Docker Swarm stack template — federated-tier test instance
|
||||
#
|
||||
# PURPOSE
|
||||
# Deploys a single federated-tier Mosaic gateway with co-located Postgres
|
||||
# (pgvector) and Valkey for end-to-end federation testing. Intended for
|
||||
# mos-test-1.woltje.com and mos-test-2.woltje.com (DEPLOY-03/04).
|
||||
#
|
||||
# REQUIRED ENV VARS (set per-stack in Portainer → Stacks → Environment variables)
|
||||
# STACK_NAME Unique name for Traefik router/service labels.
|
||||
# Examples: mos-test-1, mos-test-2
|
||||
# HOST_FQDN Fully-qualified domain name served by this stack.
|
||||
# Examples: mos-test-1.woltje.com, mos-test-2.woltje.com
|
||||
# POSTGRES_PASSWORD Database password — set per stack; do NOT commit a default.
|
||||
# BETTER_AUTH_SECRET Random 32-char string for BetterAuth session signing.
|
||||
# Generate: openssl rand -base64 32
|
||||
# BETTER_AUTH_URL Public gateway base URL, e.g. https://mos-test-1.woltje.com
|
||||
#
|
||||
# OPTIONAL ENV VARS (uncomment and set in Portainer to enable features)
|
||||
# ANTHROPIC_API_KEY sk-ant-...
|
||||
# OPENAI_API_KEY sk-...
|
||||
# OTEL_EXPORTER_OTLP_ENDPOINT http://<collector>:4318
|
||||
# OTEL_SERVICE_NAME (default: mosaic-gateway)
|
||||
#
|
||||
# REQUIRED EXTERNAL RESOURCES
|
||||
# traefik-public Docker overlay network — must exist before deploying.
|
||||
# Create: docker network create --driver overlay --attachable traefik-public
|
||||
# letsencrypt Traefik cert resolver configured on the Swarm manager.
|
||||
# DNS A record ${HOST_FQDN} → Swarm ingress IP (or Cloudflare proxy).
|
||||
#
|
||||
# IMAGE
|
||||
# Pinned to sha-9f1a081 (main HEAD post-#488 Dockerfile fix). The previous
|
||||
# pin (fed-v0.1.0-m1, sha256:9b72e2...) had a broken pnpm copy and could
|
||||
# not resolve @mosaicstack/storage at runtime. The new digest was smoke-
|
||||
# tested locally — gateway boots, imports resolve, tier-detector runs.
|
||||
# Update digest here when promoting a new build.
|
||||
#
|
||||
# HEALTHCHECK NOTE (2026-04-21)
|
||||
# Switched from busybox wget to node http.get on 127.0.0.1 (not localhost) to
|
||||
# avoid IPv6 resolution issues on Alpine. Retries increased to 5 and
|
||||
# start_period to 60s to cover the NestJS/GC cold-start window (~40-50s).
|
||||
# restart_policy set to `any` so SIGTERM/clean-exit also triggers restart.
|
||||
#
|
||||
# NOTE: This is a TEST template — production deployments use a separate
|
||||
# parameterised template with stricter resource limits and secrets.
|
||||
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
gateway:
|
||||
image: git.mosaicstack.dev/mosaicstack/stack/gateway@sha256:1069117740e00ccfeba357cae38c43f3729fe5ae702740ce474f6512414d7c02
|
||||
# Tag for human reference: sha-9f1a081 (post-#488 Dockerfile fix; smoke-tested locally)
|
||||
environment:
|
||||
# ── Tier ───────────────────────────────────────────────────────────────
|
||||
MOSAIC_TIER: federated
|
||||
|
||||
# ── Database ───────────────────────────────────────────────────────────
|
||||
DATABASE_URL: postgres://gateway:${POSTGRES_PASSWORD}@postgres:5432/mosaic
|
||||
|
||||
# ── Queue ──────────────────────────────────────────────────────────────
|
||||
VALKEY_URL: redis://valkey:6379
|
||||
|
||||
# ── Gateway ────────────────────────────────────────────────────────────
|
||||
GATEWAY_PORT: '3000'
|
||||
GATEWAY_CORS_ORIGIN: https://${HOST_FQDN}
|
||||
|
||||
# ── Auth ───────────────────────────────────────────────────────────────
|
||||
BETTER_AUTH_SECRET: ${BETTER_AUTH_SECRET}
|
||||
BETTER_AUTH_URL: https://${HOST_FQDN}
|
||||
|
||||
# ── Observability ──────────────────────────────────────────────────────
|
||||
OTEL_SERVICE_NAME: ${STACK_NAME:-mosaic-gateway}
|
||||
# OTEL_EXPORTER_OTLP_ENDPOINT: http://<collector>:4318
|
||||
|
||||
# ── AI Providers (uncomment to enable) ─────────────────────────────────
|
||||
# ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY}
|
||||
# OPENAI_API_KEY: ${OPENAI_API_KEY}
|
||||
networks:
|
||||
- federated-test
|
||||
- traefik-public
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: any
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
labels:
|
||||
- 'traefik.enable=true'
|
||||
- 'traefik.docker.network=traefik-public'
|
||||
- 'traefik.http.routers.${STACK_NAME}.rule=Host(`${HOST_FQDN}`)'
|
||||
- 'traefik.http.routers.${STACK_NAME}.entrypoints=websecure'
|
||||
- 'traefik.http.routers.${STACK_NAME}.tls=true'
|
||||
- 'traefik.http.routers.${STACK_NAME}.tls.certresolver=letsencrypt'
|
||||
- 'traefik.http.services.${STACK_NAME}.loadbalancer.server.port=3000'
|
||||
healthcheck:
|
||||
test:
|
||||
- 'CMD'
|
||||
- 'node'
|
||||
- '-e'
|
||||
- "require('http').get('http://127.0.0.1:3000/health',r=>process.exit(r.statusCode===200?0:1)).on('error',()=>process.exit(1))"
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 60s
|
||||
depends_on:
|
||||
- postgres
|
||||
- valkey
|
||||
|
||||
postgres:
|
||||
image: pgvector/pgvector:pg17
|
||||
environment:
|
||||
POSTGRES_USER: gateway
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
POSTGRES_DB: mosaic
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
networks:
|
||||
- federated-test
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready -U gateway']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 10s
|
||||
|
||||
valkey:
|
||||
image: valkey/valkey:8-alpine
|
||||
volumes:
|
||||
- valkey-data:/data
|
||||
networks:
|
||||
- federated-test
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
healthcheck:
|
||||
test: ['CMD', 'valkey-cli', 'ping']
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
start_period: 5s
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
valkey-data:
|
||||
|
||||
networks:
|
||||
federated-test:
|
||||
driver: overlay
|
||||
traefik-public:
|
||||
external: true
|
||||
@@ -27,6 +27,7 @@ services:
|
||||
postgres-federated:
|
||||
image: pgvector/pgvector:pg17
|
||||
profiles: [federated]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '${PG_FEDERATED_HOST_PORT:-5433}:5432'
|
||||
environment:
|
||||
@@ -45,6 +46,7 @@ services:
|
||||
valkey-federated:
|
||||
image: valkey/valkey:8-alpine
|
||||
profiles: [federated]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '${VALKEY_FEDERATED_HOST_PORT:-6380}:6379'
|
||||
volumes:
|
||||
@@ -55,6 +57,64 @@ services:
|
||||
timeout: 3s
|
||||
retries: 5
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Step-CA — Mosaic Federation internal certificate authority
|
||||
#
|
||||
# Image: pinned to 0.27.4 (latest stable as of late 2025).
|
||||
# `latest` is forbidden per Mosaic image policy (immutable tag required for
|
||||
# reproducible deployments and digest-first promotion in CI).
|
||||
#
|
||||
# Profile: `federated` — this service must not start in non-federated dev.
|
||||
#
|
||||
# Password:
|
||||
# Dev: bind-mount ./infra/step-ca/dev-password (gitignored; copy from
|
||||
# ./infra/step-ca/dev-password.example and customise locally).
|
||||
# Prod: replace the bind-mount with a Docker secret:
|
||||
# secrets:
|
||||
# ca_password:
|
||||
# external: true
|
||||
# and reference it as `/run/secrets/ca_password` (same path the
|
||||
# init script already uses).
|
||||
#
|
||||
# Provisioner: "mosaic-fed" (consumed by apps/gateway/src/federation/ca.service.ts)
|
||||
# ---------------------------------------------------------------------------
|
||||
step-ca:
|
||||
image: smallstep/step-ca:0.27.4
|
||||
profiles: [federated]
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '${STEP_CA_HOST_PORT:-9000}:9000'
|
||||
volumes:
|
||||
- step_ca_data:/home/step
|
||||
# init script — executed as the container entrypoint
|
||||
- ./infra/step-ca/init.sh:/usr/local/bin/mosaic-step-ca-init.sh:ro
|
||||
# X.509 template skeleton (wired in M2-04)
|
||||
- ./infra/step-ca/templates:/etc/step-ca-templates:ro
|
||||
# Dev password file — GITIGNORED; copy from dev-password.example
|
||||
# In production, replace this with a Docker secret (see comment above).
|
||||
- ./infra/step-ca/dev-password:/run/secrets/ca_password:ro
|
||||
entrypoint: ['/bin/sh', '/usr/local/bin/mosaic-step-ca-init.sh']
|
||||
healthcheck:
|
||||
# The healthcheck requires the root cert to exist, which is only true
|
||||
# after init.sh has completed on first boot. start_period gives init
|
||||
# time to finish before Docker starts counting retries.
|
||||
test:
|
||||
[
|
||||
'CMD',
|
||||
'step',
|
||||
'ca',
|
||||
'health',
|
||||
'--ca-url',
|
||||
'https://localhost:9000',
|
||||
'--root',
|
||||
'/home/step/certs/root_ca.crt',
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
pg_federated_data:
|
||||
valkey_federated_data:
|
||||
step_ca_data:
|
||||
|
||||
@@ -5,18 +5,27 @@ RUN corepack enable
|
||||
|
||||
FROM base AS builder
|
||||
WORKDIR /app
|
||||
# Copy workspace manifests first for layer-cached install
|
||||
COPY pnpm-workspace.yaml pnpm-lock.yaml package.json ./
|
||||
COPY apps/gateway/package.json ./apps/gateway/
|
||||
COPY packages/ ./packages/
|
||||
COPY plugins/ ./plugins/
|
||||
RUN pnpm install --frozen-lockfile
|
||||
COPY . .
|
||||
RUN pnpm --filter @mosaic/gateway build
|
||||
# Build gateway and all of its workspace dependencies via turbo dependency graph
|
||||
RUN pnpm turbo run build --filter @mosaicstack/gateway...
|
||||
# Produce a self-contained deploy artifact: flat node_modules, no pnpm symlinks
|
||||
# --legacy is required for pnpm v10 when inject-workspace-packages is not set
|
||||
RUN pnpm --filter @mosaicstack/gateway --prod deploy --legacy /deploy
|
||||
|
||||
FROM base AS runner
|
||||
WORKDIR /app
|
||||
ENV NODE_ENV=production
|
||||
# Use the pnpm deploy output — resolves all deps into a flat, self-contained node_modules
|
||||
COPY --from=builder /deploy/node_modules ./node_modules
|
||||
COPY --from=builder /deploy/package.json ./package.json
|
||||
# dist is declared in package.json "files" so pnpm deploy copies it into /deploy;
|
||||
# copy from builder explicitly as belt-and-suspenders
|
||||
COPY --from=builder /app/apps/gateway/dist ./dist
|
||||
COPY --from=builder /app/apps/gateway/package.json ./package.json
|
||||
COPY --from=builder /app/node_modules ./node_modules
|
||||
EXPOSE 4000
|
||||
CMD ["node", "dist/main.js"]
|
||||
|
||||
@@ -7,13 +7,22 @@
|
||||
|
||||
**ID:** federation-v1-20260419
|
||||
**Statement:** Jarvis operates across 3–4 workstations in two physical locations (home, USC). The user currently reaches back to a single jarvis-brain checkout from every session; a prior OpenBrain attempt caused cache, latency, and opacity pain. This mission builds asymmetric federation between Mosaic Stack gateways so that a session on a user's home gateway can query their work gateway in real time without data ever persisting across the boundary, with full multi-tenant isolation and standard-PKI (X.509 / Step-CA) trust management.
|
||||
**Phase:** Planning complete — M1 implementation not started
|
||||
**Current Milestone:** FED-M1
|
||||
**Progress:** 0 / 7 milestones
|
||||
**Phase:** M2 active — Step-CA + grant schema + admin CLI; parallel test-deploy workstream stood up
|
||||
**Current Milestone:** FED-M2
|
||||
**Progress:** 1 / 7 milestones
|
||||
**Status:** active
|
||||
**Last Updated:** 2026-04-19 (PRD + MILESTONES + tracking issues filed)
|
||||
**Last Updated:** 2026-04-21 (M2 decomposed; mos-test-1/-2 designated as federation E2E test hosts)
|
||||
**Parent Mission:** None — new mission
|
||||
|
||||
## Test Infrastructure
|
||||
|
||||
| Host | Role | Image | Tier |
|
||||
| ----------------------- | ----------------------------------- | ------------------------------------- | --------- |
|
||||
| `mos-test-1.woltje.com` | Federation Server A (querying side) | `gateway:fed-v0.1.0-m1` (M1 baseline) | federated |
|
||||
| `mos-test-2.woltje.com` | Federation Server B (serving side) | `gateway:fed-v0.1.0-m1` (M1 baseline) | federated |
|
||||
|
||||
These are TEST hosts for federation E2E (M3+). Distinct from PRD AC-12 production targets (`woltje.com` ↔ `uscllc.com`). Deployment workstream tracked in `docs/federation/TASKS.md` under FED-M2-DEPLOY-\*.
|
||||
|
||||
## Context
|
||||
|
||||
Federation is the solution to what originally drove OpenBrain. The prior attempt coupled every agent session to a remote service, introduced cache/latency/opacity pain, and created a hard dependency that punished offline use. This redesign:
|
||||
@@ -51,15 +60,15 @@ Key design references:
|
||||
|
||||
## Milestones
|
||||
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------ | --------------------------------------------- | ----------- | ------ | ----- | ------- | --------- |
|
||||
| 1 | FED-M1 | Federated tier infrastructure | not-started | — | #460 | — | — |
|
||||
| 2 | FED-M2 | Step-CA + grant schema + admin CLI | not-started | — | #461 | — | — |
|
||||
| 3 | FED-M3 | mTLS handshake + list/get + scope enforcement | not-started | — | #462 | — | — |
|
||||
| 4 | FED-M4 | search verb + audit log + rate limit | not-started | — | #463 | — | — |
|
||||
| 5 | FED-M5 | Cache + offline degradation + OTEL | not-started | — | #464 | — | — |
|
||||
| 6 | FED-M6 | Revocation + auto-renewal + CRL | not-started | — | #465 | — | — |
|
||||
| 7 | FED-M7 | Multi-user RBAC hardening + acceptance suite | not-started | — | #466 | — | — |
|
||||
| # | ID | Name | Status | Branch | Issue | Started | Completed |
|
||||
| --- | ------ | --------------------------------------------- | ----------- | ------------------ | ----- | ---------- | ---------- |
|
||||
| 1 | FED-M1 | Federated tier infrastructure | done | (12 PRs #470-#481) | #460 | 2026-04-19 | 2026-04-19 |
|
||||
| 2 | FED-M2 | Step-CA + grant schema + admin CLI | in-progress | (decomposition) | #461 | 2026-04-21 | — |
|
||||
| 3 | FED-M3 | mTLS handshake + list/get + scope enforcement | not-started | — | #462 | — | — |
|
||||
| 4 | FED-M4 | search verb + audit log + rate limit | not-started | — | #463 | — | — |
|
||||
| 5 | FED-M5 | Cache + offline degradation + OTEL | not-started | — | #464 | — | — |
|
||||
| 6 | FED-M6 | Revocation + auto-renewal + CRL | not-started | — | #465 | — | — |
|
||||
| 7 | FED-M7 | Multi-user RBAC hardening + acceptance suite | not-started | — | #466 | — | — |
|
||||
|
||||
## Budget
|
||||
|
||||
@@ -76,10 +85,17 @@ Key design references:
|
||||
|
||||
## Session History
|
||||
|
||||
| Session | Date | Runtime | Outcome |
|
||||
| ------- | ---------- | ------- | --------------------------------------------------- |
|
||||
| S1 | 2026-04-19 | claude | PRD authored, MILESTONES decomposed, 7 issues filed |
|
||||
| Session | Date | Runtime | Outcome |
|
||||
| ------- | ---------- | ------- | --------------------------------------------------------------------- |
|
||||
| S1 | 2026-04-19 | claude | PRD authored, MILESTONES decomposed, 7 issues filed |
|
||||
| S2-S4 | 2026-04-19 | claude | FED-M1 complete: 12 tasks (PRs #470-#481) merged; tag `fed-v0.1.0-m1` |
|
||||
|
||||
## Next Step
|
||||
|
||||
Begin FED-M1 implementation: federated tier infrastructure. Breakdown in `docs/federation/TASKS.md`.
|
||||
FED-M2 active. Decomposition landed in `docs/federation/TASKS.md` (M2-01..M2-13 code workstream + DEPLOY-01..DEPLOY-05 parallel test-deploy workstream, ~88K total). Tracking issue #482.
|
||||
|
||||
Parallel execution plan:
|
||||
|
||||
- **CODE workstream**: M2-01 (DB migration) starts immediately — sonnet subagent on `feat/federation-m2-schema`. Then M2-02 → M2-09 sequentially with M2-04/M2-05/M2-06/M2-07 having interleaved CA/storage/grant dependencies.
|
||||
- **DEPLOY workstream**: DEPLOY-01 (image verify) → DEPLOY-02 (stack template) → DEPLOY-03/04 (mos-test-1/-2 deploy) → DEPLOY-05 (TEST-INFRA.md). Gated on Portainer wrapper PR (`PORTAINER_INSECURE` flag) merging first.
|
||||
- **Re-converge** at M2-10 (E2E test) once both workstreams ready.
|
||||
|
||||
190
docs/federation/SETUP.md
Normal file
190
docs/federation/SETUP.md
Normal file
@@ -0,0 +1,190 @@
|
||||
# Federated Tier Setup Guide
|
||||
|
||||
## What is the federated tier?
|
||||
|
||||
The federated tier is designed for multi-user and multi-host deployments. It consists of PostgreSQL 17 with pgvector extension (for embeddings and RAG), Valkey for distributed task queueing and caching, and a shared configuration across multiple Mosaic gateway instances. Use this tier when running Mosaic in production or when scaling beyond a single-host deployment.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker and Docker Compose installed
|
||||
- Ports 5433 (PostgreSQL) and 6380 (Valkey) available on your host (or adjust environment variables)
|
||||
- At least 2 GB free disk space for data volumes
|
||||
|
||||
## Start the federated stack
|
||||
|
||||
Run the federated overlay:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
```
|
||||
|
||||
This starts PostgreSQL 17 with pgvector and Valkey 8. The pgvector extension is created automatically on first boot.
|
||||
|
||||
Verify the services are running:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.federated.yml ps
|
||||
```
|
||||
|
||||
Expected output shows `postgres-federated` and `valkey-federated` both healthy.
|
||||
|
||||
## Configure mosaic for federated tier
|
||||
|
||||
Create or update your `mosaic.config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"tier": "federated",
|
||||
"database": "postgresql://mosaic:mosaic@localhost:5433/mosaic",
|
||||
"queue": "redis://localhost:6380"
|
||||
}
|
||||
```
|
||||
|
||||
If you're using environment variables instead:
|
||||
|
||||
```bash
|
||||
export DATABASE_URL="postgresql://mosaic:mosaic@localhost:5433/mosaic"
|
||||
export REDIS_URL="redis://localhost:6380"
|
||||
```
|
||||
|
||||
## Verify health
|
||||
|
||||
Run the health check:
|
||||
|
||||
```bash
|
||||
mosaic gateway doctor
|
||||
```
|
||||
|
||||
Expected output (green):
|
||||
|
||||
```
|
||||
Tier: federated Config: mosaic.config.json
|
||||
✓ postgres localhost:5433 (42ms)
|
||||
✓ valkey localhost:6380 (8ms)
|
||||
✓ pgvector (embedded) (15ms)
|
||||
```
|
||||
|
||||
For JSON output (useful in CI/automation):
|
||||
|
||||
```bash
|
||||
mosaic gateway doctor --json
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port conflicts
|
||||
|
||||
**Symptom:** `bind: address already in use`
|
||||
|
||||
**Fix:** Stop the base dev stack first:
|
||||
|
||||
```bash
|
||||
docker compose down
|
||||
docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
```
|
||||
|
||||
Or change the host port with an environment variable:
|
||||
|
||||
```bash
|
||||
PG_FEDERATED_HOST_PORT=5434 VALKEY_FEDERATED_HOST_PORT=6381 \
|
||||
docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
```
|
||||
|
||||
### pgvector extension error
|
||||
|
||||
**Symptom:** `ERROR: could not open extension control file`
|
||||
|
||||
**Fix:** pgvector is created at first boot. Check logs:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.federated.yml logs postgres-federated | grep -i vector
|
||||
```
|
||||
|
||||
If missing, exec into the container and create it manually:
|
||||
|
||||
```bash
|
||||
docker exec <postgres-federated-id> psql -U mosaic -d mosaic -c "CREATE EXTENSION vector;"
|
||||
```
|
||||
|
||||
### Valkey connection refused
|
||||
|
||||
**Symptom:** `Error: connect ECONNREFUSED 127.0.0.1:6380`
|
||||
|
||||
**Fix:** Check service health:
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.federated.yml logs valkey-federated
|
||||
```
|
||||
|
||||
If Valkey is running, verify your firewall allows 6380. On macOS, Docker Desktop may require binding to `host.docker.internal` instead of `localhost`.
|
||||
|
||||
## Key rotation (deferred)
|
||||
|
||||
Federation peer private keys (`federation_peers.client_key_pem`) are sealed at rest using AES-256-GCM with a key derived from `BETTER_AUTH_SECRET` via SHA-256. If `BETTER_AUTH_SECRET` is rotated, all sealed `client_key_pem` values in the database become unreadable and must be re-sealed with the new key before rotation completes.
|
||||
|
||||
The full key rotation procedure (decrypt all rows with old key, re-encrypt with new key, atomically swap the secret) is out of scope for M2. Operators must not rotate `BETTER_AUTH_SECRET` without a migration plan for all sealed federation peer keys.
|
||||
|
||||
## OID Assignments — Mosaic Internal OID Arc
|
||||
|
||||
Mosaic uses the private enterprise arc `1.3.6.1.4.1.99999` for custom X.509
|
||||
certificate extensions in federation grant certificates.
|
||||
|
||||
**IMPORTANT:** This is a development/internal OID arc. Before deploying to a
|
||||
production environment accessible by external parties, register a proper IANA
|
||||
Private Enterprise Number (PEN) at <https://pen.iana.org/pen/PenApplication.page>
|
||||
and update these assignments accordingly.
|
||||
|
||||
### Assigned OIDs
|
||||
|
||||
| OID | Symbolic name | Description |
|
||||
| --------------------- | --------------------------------- | --------------------------------------------------------- |
|
||||
| `1.3.6.1.4.1.99999.1` | `mosaic.federation.grantId` | UUID of the `federation_grants` row authorising this cert |
|
||||
| `1.3.6.1.4.1.99999.2` | `mosaic.federation.subjectUserId` | UUID of the local user on whose behalf the cert is issued |
|
||||
|
||||
### Encoding
|
||||
|
||||
Each extension value is DER-encoded as an ASN.1 **UTF8String**:
|
||||
|
||||
```
|
||||
Tag 0x0C (UTF8String)
|
||||
Length 0x24 (36 decimal — fixed length of a UUID string)
|
||||
Value <36 ASCII bytes of the UUID>
|
||||
```
|
||||
|
||||
The step-ca X.509 template at `infra/step-ca/templates/federation.tpl`
|
||||
produces this encoding via the Go template expression:
|
||||
|
||||
```
|
||||
{{ printf "\x0c\x24%s" .Token.mosaic_grant_id | b64enc }}
|
||||
```
|
||||
|
||||
The resulting base64 value is passed as the `value` field of the extension
|
||||
object in the template JSON.
|
||||
|
||||
### CA Environment Variables
|
||||
|
||||
The `CaService` (`apps/gateway/src/federation/ca.service.ts`) requires the
|
||||
following environment variables at gateway startup:
|
||||
|
||||
| Variable | Required | Description |
|
||||
| ------------------------------ | -------- | -------------------------------------------------------------------- |
|
||||
| `STEP_CA_URL` | Yes | Base URL of the step-ca instance, e.g. `https://step-ca:9000` |
|
||||
| `STEP_CA_PROVISIONER_PASSWORD` | Yes | JWK provisioner password for the `mosaic-fed` provisioner |
|
||||
| `STEP_CA_PROVISIONER_KEY_JSON` | Yes | JSON-encoded JWK (public + private) for the `mosaic-fed` provisioner |
|
||||
| `STEP_CA_ROOT_CERT_PATH` | Yes | Absolute path to the step-ca root CA certificate PEM file |
|
||||
|
||||
Set these variables in your environment or secret manager before starting
|
||||
the gateway. In the federated Docker Compose stack they are expected to be
|
||||
injected via Docker secrets and environment variable overrides.
|
||||
|
||||
### Fail-loud contract
|
||||
|
||||
The CA service (and the X.509 template) are designed to fail loudly if the
|
||||
custom OIDs cannot be embedded:
|
||||
|
||||
- The template produces a malformed extension value (zero-length UTF8String
|
||||
body) when the JWT claims `mosaic_grant_id` or `mosaic_subject_user_id` are
|
||||
absent. step-ca rejects the CSR rather than issuing a cert without the OIDs.
|
||||
- `CaService.issueCert()` throws a `CaServiceError` on every error path with
|
||||
a human-readable `remediation` string. It never silently returns a cert that
|
||||
may be missing the required extensions.
|
||||
@@ -15,20 +15,20 @@
|
||||
|
||||
Goal: Gateway runs in `federated` tier with containerized PG+pgvector+Valkey. No federation logic yet. Existing standalone behavior does not regress.
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ------------------------------- | ---------- | -------- | --------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| FED-M1-01 | done | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | sonnet | feat/federation-m1-tier-config | — | 4K | Shipped in PR #470. Renamed `team` → `standalone`; added `team` deprecation alias; added `DEFAULT_FEDERATED_CONFIG`. |
|
||||
| FED-M1-02 | in-progress | Author `docker-compose.federated.yml` as an overlay profile: Postgres 17 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | sonnet | feat/federation-m1-compose | FED-M1-01 | 5K | Bumped PG16→PG17 to match base compose. Overlay defines distinct `postgres-federated`/`valkey-federated` services, profile-gated. |
|
||||
| FED-M1-03 | not-started | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | codex | feat/federation-m1-pgvector | FED-M1-02 | 8K | Extension create is idempotent `CREATE EXTENSION IF NOT EXISTS vector`. Gate on tier = federated. |
|
||||
| FED-M1-04 | not-started | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | codex | feat/federation-m1-detector | FED-M1-03 | 8K | Structured error type with remediation hints. Logs which service failed, with host:port attempted. |
|
||||
| FED-M1-05 | not-started | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | codex | feat/federation-m1-migrate | FED-M1-04 | 10K | Do NOT run automatically. CLI subcommand `mosaic migrate tier --to federated --dry-run`. Safety rails. |
|
||||
| FED-M1-06 | not-started | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Existing doctor output evolves; add `--json` flag. Green/yellow/red + remediation suggestions per issue. |
|
||||
| FED-M1-07 | not-started | Integration test: gateway boots in `federated` tier with docker-compose `federated` profile; refuses to boot when PG unreachable (asserts fail-fast); pgvector extension query succeeds. | #460 | sonnet | feat/federation-m1-integration | FED-M1-04 | 8K | Vitest + docker-compose test profile. One test file per assertion; real services, no mocks. |
|
||||
| FED-M1-08 | not-started | Integration test for migration script: seed a local PGlite with representative data (tasks, notes, users, teams), run migration, assert row counts + key samples equal on federated PG. | #460 | sonnet | feat/federation-m1-migrate-test | FED-M1-05 | 6K | Runs against docker-compose federated profile; uses temp PGlite file; deterministic seed. |
|
||||
| FED-M1-09 | not-started | Standalone regression: full agent-session E2E on existing `standalone` tier with a gateway built from this branch. Must pass without referencing any federation module. | #460 | haiku | feat/federation-m1-regression | FED-M1-07 | 4K | Reuse existing e2e harness; just re-point at the federation branch build. Canary that we didn't break it. |
|
||||
| FED-M1-10 | not-started | Code review pass: security-focused on the migration script (data-at-rest during migration) + tier detector (error-message sensitivity leakage). Independent reviewer, not authors of tasks 01-09. | #460 | sonnet | — | FED-M1-09 | 8K | Use `feature-dev:code-reviewer` agent. Specifically: no secrets in error messages; no partial-migration footguns. |
|
||||
| FED-M1-11 | not-started | Docs update: `docs/federation/` operator notes for tier setup; README blurb on federated tier; `docs/guides/` entry for migration. Do NOT touch runbook yet (deferred to FED-M7). | #460 | haiku | feat/federation-m1-docs | FED-M1-10 | 4K | Short, actionable. Link from MISSION-MANIFEST. No decisions captured here — those belong in PRD. |
|
||||
| FED-M1-12 | not-started | PR, CI green, merge to main, close #460. | #460 | — | (aggregate) | FED-M1-11 | 3K | Queue-guard before push; wait for green; merge squashed; tea `issue-close` #460. |
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------------------- | ---------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| FED-M1-01 | done | Extend `mosaic.config.json` schema: add `"federated"` to `tier` enum in validator + TS types. Keep `local` and `standalone` working. Update schema docs/README where referenced. | #460 | sonnet | feat/federation-m1-tier-config | — | 4K | Shipped in PR #470. Renamed `team` → `standalone`; added `team` deprecation alias; added `DEFAULT_FEDERATED_CONFIG`. |
|
||||
| FED-M1-02 | done | Author `docker-compose.federated.yml` as an overlay profile: Postgres 17 + pgvector extension (port 5433), Valkey (6380), named volumes, healthchecks. Compose-up should boot cleanly on a clean machine. | #460 | sonnet | feat/federation-m1-compose | FED-M1-01 | 5K | Shipped in PR #471. Overlay defines `postgres-federated`/`valkey-federated`, profile-gated, with pg-init for pgvector extension. |
|
||||
| FED-M1-03 | done | Add pgvector support to `packages/storage/src/adapters/postgres.ts`: create extension on init (idempotent), expose vector column type in schema helpers. No adapter changes for non-federated tiers. | #460 | sonnet | feat/federation-m1-pgvector | FED-M1-02 | 8K | Shipped in PR #472. `enableVector` flag on postgres StorageConfig; idempotent CREATE EXTENSION before migrations. |
|
||||
| FED-M1-04 | done | Implement `apps/gateway/src/bootstrap/tier-detector.ts`: reads config, asserts PG/Valkey/pgvector reachable for `federated`, fail-fast with actionable error message on failure. Unit tests for each failure mode. | #460 | sonnet | feat/federation-m1-detector | FED-M1-03 | 8K | Shipped in PR #473. 12 tests; 5s timeouts on probes; pgvector library/permission discrimination; rejects non-bullmq for federated. |
|
||||
| FED-M1-05 | done | Write `scripts/migrate-to-federated.ts`: one-way migration from `local` (PGlite) / `standalone` (PG without pgvector) → `federated`. Dumps, transforms, loads; dry-run + confirm UX. Idempotent on re-run. | #460 | sonnet | feat/federation-m1-migrate | FED-M1-04 | 10K | Shipped in PR #474. `mosaic storage migrate-tier`; DrizzleMigrationSource (corrects P0 found in review); 32 tests; idempotent. |
|
||||
| FED-M1-06 | done | Update `mosaic doctor`: report current tier, required services, actual health per service, pgvector presence, overall green/yellow/red. Machine-readable JSON output flag for CI use. | #460 | sonnet | feat/federation-m1-doctor | FED-M1-04 | 6K | Shipped in PR #475 as `mosaic gateway doctor`. Probes lifted to @mosaicstack/storage; structural TierConfig breaks dep cycle. |
|
||||
| FED-M1-07 | done | Integration test: gateway boots in `federated` tier with docker-compose `federated` profile; refuses to boot when PG unreachable (asserts fail-fast); pgvector extension query succeeds. | #460 | sonnet | feat/federation-m1-integration | FED-M1-04 | 8K | Shipped in PR #476. 3 test files, 4 tests, gated by FEDERATED_INTEGRATION=1; reserved-port helper avoids host collisions. |
|
||||
| FED-M1-08 | done | Integration test for migration script: seed a local PGlite with representative data (tasks, notes, users, teams), run migration, assert row counts + key samples equal on federated PG. | #460 | sonnet | feat/federation-m1-migrate-test | FED-M1-05 | 6K | Shipped in PR #477. Caught P0 in M1-05 (camelCase→snake_case) missed by mocked unit tests; fix in same PR. |
|
||||
| FED-M1-09 | done | Standalone regression: full agent-session E2E on existing `standalone` tier with a gateway built from this branch. Must pass without referencing any federation module. | #460 | sonnet | feat/federation-m1-regression | FED-M1-07 | 4K | Clean canary. 351 gateway tests + 85 storage unit tests + full pnpm test all green; only FEDERATED_INTEGRATION-gated tests skip. |
|
||||
| FED-M1-10 | done | Code review pass: security-focused on the migration script (data-at-rest during migration) + tier detector (error-message sensitivity leakage). Independent reviewer, not authors of tasks 01-09. | #460 | sonnet | feat/federation-m1-security-review | FED-M1-09 | 8K | 2 review rounds caught 7 issues: credential leak in pg/valkey/pgvector errors + redact-error util; missing advisory lock; SKIP_TABLES rationale. |
|
||||
| FED-M1-11 | done | Docs update: `docs/federation/` operator notes for tier setup; README blurb on federated tier; `docs/guides/` entry for migration. Do NOT touch runbook yet (deferred to FED-M7). | #460 | haiku | feat/federation-m1-docs | FED-M1-10 | 4K | Shipped: `docs/federation/SETUP.md` (119 lines), `docs/guides/migrate-tier.md` (147 lines), README Configuration blurb. |
|
||||
| FED-M1-12 | done | PR, CI green, merge to main, close #460. | #460 | sonnet | feat/federation-m1-close | FED-M1-11 | 3K | M1 closed. PRs #470-#480 merged across 11 tasks. Issue #460 closed; release tag `fed-v0.1.0-m1` published. |
|
||||
|
||||
**M1 total estimate:** ~74K tokens (over-budget vs 20K PRD estimate — explanation below)
|
||||
|
||||
@@ -36,9 +36,52 @@ Goal: Gateway runs in `federated` tier with containerized PG+pgvector+Valkey. No
|
||||
|
||||
---
|
||||
|
||||
## Pre-M2 — Test deployment infrastructure (FED-M2-DEPLOY)
|
||||
|
||||
Goal: Two federated-tier gateways stood up on Portainer at `mos-test-1.woltje.com` and `mos-test-2.woltje.com` running the M1 release (`gateway:fed-v0.1.0-m1`). This is the test bed for M2 enrollment work and the M3 federation E2E harness. No federation logic exercised yet — pure infrastructure validation.
|
||||
|
||||
> **Why now:** M2 enrollment requires a real second gateway to test peer-add flows; standing the test hosts up before M2 code lands gives both code and deployment streams a fast feedback loop.
|
||||
|
||||
> **Parallelizable:** This workstream runs in parallel with the M2 code workstream (M2-01 → M2-13). They re-converge at M2-10 (E2E test).
|
||||
|
||||
> **Tracking issue:** #482.
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------------------- | ----------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | ------ | ------------------------------------- | ------------ | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| FED-M2-DEPLOY-01 | done | Verify `gateway:fed-v0.1.0-m1` image was published by `.woodpecker/publish.yml` on tag push; if not, investigate and remediate. Document image URI in deployment artifact. | #482 | sonnet | (verified inline, no PR) | — | 2K | Tag exists; digest `sha256:9b72e202a9eecc27d31920b87b475b9e96e483c0323acc57856be4b1355db1ec` captured for digest-pinned deploys. |
|
||||
| FED-M2-DEPLOY-02 | done | Author Portainer git-stack compose file `deploy/portainer/federated-test.stack.yml` (gateway + PG-pgvector + Valkey, env-driven). Use immutable tag, not `latest`. | #482 | sonnet | feat/federation-deploy-stack-template | DEPLOY-01 | 5K | Shipped in PR #485. Digest-pinned. Env: STACK_NAME, HOST_FQDN, POSTGRES_PASSWORD, BETTER_AUTH_SECRET, BETTER_AUTH_URL. |
|
||||
| FED-M2-DEPLOY-IMG-FIX | in-progress | Gateway image runtime broken (ERR_MODULE_NOT_FOUND for `dotenv`); Dockerfile copies `.pnpm/` store but not `apps/gateway/node_modules` symlinks. Switch to `pnpm deploy` for self-contained runtime. | #482 | sonnet | (subagent in flight) | DEPLOY-02 | 4K | Subagent `a78a9ab0ddae91fbc` in flight. Triggers Kaniko rebuild on merge; capture new digest; bump stack template in follow-up PR before redeploy. |
|
||||
| FED-M2-DEPLOY-03 | blocked | Deploy stack to mos-test-1.woltje.com via `~/.config/mosaic/tools/portainer/`. Verify M1 acceptance: federated-tier boot succeeds; `mosaic gateway doctor --json` returns green; pgvector `vector(3)` round-trip works. | #482 | sonnet | feat/federation-deploy-test-1 | IMG-FIX | 3K | Stack created on Portainer endpoint 3 (Swarm `local`), but blocked on image fix. Container fails on boot until IMG-FIX merges + redeploy. |
|
||||
| FED-M2-DEPLOY-04 | blocked | Deploy stack to mos-test-2.woltje.com via Portainer wrapper. Same M1 acceptance probes as DEPLOY-03. | #482 | sonnet | feat/federation-deploy-test-2 | IMG-FIX | 3K | Same status as DEPLOY-03. Stack created; blocked on image fix. |
|
||||
| FED-M2-DEPLOY-05 | not-started | Document deployment in `docs/federation/TEST-INFRA.md`: hosts, image tags, secrets sourcing, redeploy procedure, teardown. Update MISSION-MANIFEST with deployment status. | #482 | haiku | feat/federation-deploy-docs | DEPLOY-03,04 | 3K | Operator-facing doc; mentions but does not duplicate `tools/portainer/README.md`. |
|
||||
|
||||
**Deploy workstream estimate:** ~16K tokens
|
||||
|
||||
---
|
||||
|
||||
## Milestone 2 — Step-CA + grant schema + admin CLI (FED-M2)
|
||||
|
||||
_Deferred to mission planning when M1 is complete. Issue #461 tracks scope._
|
||||
Goal: An admin can create a federation grant; counterparty enrolls; cert is signed by Step-CA with SAN OIDs for `grantId` + `subjectUserId`. No runtime federation traffic flows yet (that's M3).
|
||||
|
||||
| id | status | description | issue | agent | branch | depends_on | estimate | notes |
|
||||
| --------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----- | ------ | ---------------------------------- | ---------------- | -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| FED-M2-01 | needs-qa | DB migration: `federation_grants`, `federation_peers`, `federation_audit_log` tables + enum types (`grant_status`, `peer_state`). Drizzle schema + migration generation; migration tests. | #461 | sonnet | feat/federation-m2-schema | — | 5K | PR #486 open. First review NEEDS CHANGES (missing DESC indexes + reserved cols). Remediation subagent `a673dd9355dc26f82` in flight in worktree `agent-a4404ac1`. |
|
||||
| FED-M2-02 | not-started | Add Step-CA sidecar to `docker-compose.federated.yml`: official `smallstep/step-ca` image, persistent CA volume, JWK provisioner config baked into init script. | #461 | sonnet | feat/federation-m2-stepca | DEPLOY-02 | 4K | Profile-gated under `federated`. CA password from secret; dev compose uses dev-only password file. |
|
||||
| FED-M2-03 | not-started | Scope JSON schema + validator: `resources` allowlist, `excluded_resources`, `include_teams`, `include_personal`, `max_rows_per_query`. Vitest unit tests for valid + invalid scopes. | #461 | sonnet | feat/federation-m2-scope-schema | — | 4K | Validator independent of CA — reusable from grant CRUD + (later) M3 scope enforcement. |
|
||||
| FED-M2-04 | not-started | `apps/gateway/src/federation/ca.service.ts`: Step-CA client (CSR submission, OID-bearing cert retrieval). Mocked + integration tests against real Step-CA container. | #461 | sonnet | feat/federation-m2-ca-service | M2-02 | 6K | SAN OIDs: `grantId` (custom OID 1.3.6.1.4.1.99999.1) + `subjectUserId` (1.3.6.1.4.1.99999.2). Document OID assignments in PRD/SETUP. **Acceptance**: must (a) wire `federation.tpl` template into `mosaic-fed` provisioner config and (b) include a unit/integration test asserting issued certs contain BOTH OIDs — fails-loud guard against silent OID stripping (carry-forward from M2-02 review). |
|
||||
| FED-M2-05 | not-started | Sealed storage for `client_key_pem` reusing existing `provider_credentials` sealing key. Tests prove DB-at-rest is ciphertext, not PEM. Key rotation path documented (deferred impl). | #461 | sonnet | feat/federation-m2-key-sealing | M2-01 | 5K | Separate from M2-06 to keep crypto seam isolated; reviewer focus is sealing only. |
|
||||
| FED-M2-06 | not-started | `grants.service.ts`: CRUD + status transitions (`pending` → `active` → `revoked`); integrates M2-03 (scope) + M2-05 (sealing). Unit tests cover all transitions including invalid ones. | #461 | sonnet | feat/federation-m2-grants-service | M2-03, M2-05 | 6K | Business logic only — CSR + cert work delegated to M2-04. Revocation handler is M6. |
|
||||
| FED-M2-07 | not-started | `enrollment.controller.ts`: short-lived single-use token endpoint; CSR signing; updates grant `pending` → `active`; emits enrollment audit (table-only write, M4 tightens). | #461 | sonnet | feat/federation-m2-enrollment | M2-04, M2-06 | 6K | Tokens single-use with 410 on replay; tokens TTL'd at 15min; rate-limited at request layer (M4 introduces guard, M2 uses simple lock). |
|
||||
| FED-M2-08 | not-started | Admin CLI: `mosaic federation grant create/list/show` + `peer add/list`. Integration with grants.service (no API duplication). Help output + machine-readable JSON option. | #461 | sonnet | feat/federation-m2-cli | M2-06, M2-07 | 7K | `peer add <enrollment-url>` is the client-side flow; resolves enrollment URL → CSR → store sealed key + cert. |
|
||||
| FED-M2-09 | not-started | Integration tests covering MILESTONES.md M2 acceptance tests #1, #2, #3, #5, #7, #8 (single-gateway suite). Real Step-CA container; vitest profile gated by `FEDERATED_INTEGRATION=1`. | #461 | sonnet | feat/federation-m2-integration | M2-08 | 8K | Tests #4 (cert OID match) + #6 (two-gateway peer-add) handled separately by M2-10 (E2E). |
|
||||
| FED-M2-10 | not-started | E2E test against deployed mos-test-1 + mos-test-2 (or local two-gateway docker-compose if Portainer not ready): MILESTONES test #6 `peer add` yields `active` peer record with valid cert + key. | #461 | sonnet | feat/federation-m2-e2e | M2-08, DEPLOY-04 | 6K | Falls back to local docker-compose-two-gateways if remote test hosts not yet available. Documents both paths. |
|
||||
| FED-M2-11 | not-started | Independent security review (sonnet, not author of M2-04/05/06/07): focus on single-use token replay, sealing leak surfaces, OID match enforcement, scope schema bypass paths. | #461 | sonnet | feat/federation-m2-security-review | M2-10 | 8K | Apply M1 two-round pattern. Reviewer should explicitly attempt enrollment-token replay, OID-spoofing CSR, and key leak in error messages. |
|
||||
| FED-M2-12 | not-started | Docs update: `docs/federation/SETUP.md` Step-CA section; new `docs/federation/ADMIN-CLI.md` with grant/peer commands; scope schema reference; OID registration note. Runbook still M7-deferred. | #461 | haiku | feat/federation-m2-docs | M2-11 | 4K | Adds CA bootstrap section to SETUP.md with `docker compose --profile federated up step-ca` example. |
|
||||
| FED-M2-13 | not-started | PR aggregate close, CI green, merge to main, close #461. Release tag `fed-v0.2.0-m2`. Mark deploy stream complete. Update mission manifest M2 row. | #461 | sonnet | feat/federation-m2-close | M2-12 | 3K | Same close pattern as M1-12; queue-guard before merge; tea release-create with notes including deploy-stream PRs. |
|
||||
|
||||
**M2 code workstream estimate:** ~72K tokens (vs MILESTONES.md 30K — same over-budget pattern as M1, where per-task breakdown including tests/review/docs catches the real cost).
|
||||
|
||||
**Deploy + code combined:** ~88K tokens.
|
||||
|
||||
## Milestone 3 — mTLS handshake + list/get + scope enforcement (FED-M3)
|
||||
|
||||
|
||||
147
docs/guides/migrate-tier.md
Normal file
147
docs/guides/migrate-tier.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Migrating to the Federated Tier
|
||||
|
||||
Step-by-step guide to migrate from `local` (PGlite) or `standalone` (PostgreSQL without pgvector) to `federated` (PostgreSQL 17 + pgvector + Valkey).
|
||||
|
||||
## When to migrate
|
||||
|
||||
Migrate to federated tier when:
|
||||
|
||||
- Scaling from single-user to multi-user deployments
|
||||
- Adding vector embeddings or RAG features
|
||||
- Running Mosaic across multiple hosts
|
||||
- Requires distributed task queueing and caching
|
||||
- Moving to production with high availability
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Federated stack running and healthy (see [Federated Tier Setup](../federation/SETUP.md))
|
||||
- Source database accessible and empty target database at the federated URL
|
||||
- Backup of source database (recommended before any migration)
|
||||
|
||||
## Dry-run first
|
||||
|
||||
Always run a dry-run to validate the migration:
|
||||
|
||||
```bash
|
||||
mosaic storage migrate-tier --to federated \
|
||||
--target-url postgresql://mosaic:mosaic@localhost:5433/mosaic \
|
||||
--dry-run
|
||||
```
|
||||
|
||||
Expected output (partial example):
|
||||
|
||||
```
|
||||
[migrate-tier] Analyzing source tier: pglite
|
||||
[migrate-tier] Analyzing target tier: federated
|
||||
[migrate-tier] Precondition: target is empty ✓
|
||||
users: 5 rows
|
||||
teams: 2 rows
|
||||
conversations: 12 rows
|
||||
messages: 187 rows
|
||||
... (all tables listed)
|
||||
[migrate-tier] NOTE: Source tier has no pgvector support. insights.embedding will be NULL on all migrated rows.
|
||||
[migrate-tier] DRY-RUN COMPLETE (no data written). 206 total rows would be migrated.
|
||||
```
|
||||
|
||||
Review the output. If it shows an error (e.g., target not empty), address it before proceeding.
|
||||
|
||||
## Run the migration
|
||||
|
||||
When ready, run without `--dry-run`:
|
||||
|
||||
```bash
|
||||
mosaic storage migrate-tier --to federated \
|
||||
--target-url postgresql://mosaic:mosaic@localhost:5433/mosaic \
|
||||
--yes
|
||||
```
|
||||
|
||||
The `--yes` flag skips the confirmation prompt (required in non-TTY environments like CI).
|
||||
|
||||
The command will:
|
||||
|
||||
1. Acquire an advisory lock (blocks concurrent invocations)
|
||||
2. Copy data from source to target in dependency order
|
||||
3. Report rows migrated per table
|
||||
4. Display any warnings (e.g., null vector embeddings)
|
||||
|
||||
## What gets migrated
|
||||
|
||||
All persistent, user-bound data is migrated in dependency order:
|
||||
|
||||
- **users, teams, team_members** — user and team ownership
|
||||
- **accounts** — OAuth provider tokens (durable credentials)
|
||||
- **projects, agents, missions, tasks** — all project and agent definitions
|
||||
- **conversations, messages** — all chat history
|
||||
- **preferences, insights, agent_logs** — preferences and observability
|
||||
- **provider_credentials** — stored API keys and secrets
|
||||
- **tickets, events, skills, routing_rules, appreciations** — auxiliary records
|
||||
|
||||
Full order is defined in code (`MIGRATION_ORDER` in `packages/storage/src/migrate-tier.ts`).
|
||||
|
||||
## What gets skipped and why
|
||||
|
||||
Three tables are intentionally not migrated:
|
||||
|
||||
| Table | Reason |
|
||||
| ----------------- | ----------------------------------------------------------------------------------------------- |
|
||||
| **sessions** | TTL'd auth sessions from the old environment; they will fail JWT verification on the new target |
|
||||
| **verifications** | One-time tokens (email verify, password reset) that have either expired or been consumed |
|
||||
| **admin_tokens** | Hashed tokens bound to the old environment's secret keys; must be re-issued |
|
||||
|
||||
**Note on accounts and provider_credentials:** These durable credentials ARE migrated because they are user-bound and required for resuming agent work on the target environment. After migration to a multi-tenant federated deployment, operators may want to audit or wipe these if users are untrusted or credentials should not be shared.
|
||||
|
||||
## Idempotency and concurrency
|
||||
|
||||
The migration is **idempotent**:
|
||||
|
||||
- Re-running is safe (uses `ON CONFLICT DO UPDATE` internally)
|
||||
- Ideal for retries on transient failures
|
||||
- Concurrent invocations are blocked by a Postgres advisory lock; the second caller will wait
|
||||
|
||||
If a previous run is stuck, check for advisory locks:
|
||||
|
||||
```sql
|
||||
SELECT * FROM pg_locks WHERE locktype='advisory';
|
||||
```
|
||||
|
||||
If you need to force-unlock (dangerous):
|
||||
|
||||
```sql
|
||||
SELECT pg_advisory_unlock(<lock_id>);
|
||||
```
|
||||
|
||||
## Verify the migration
|
||||
|
||||
After migration completes, spot-check the target:
|
||||
|
||||
```bash
|
||||
# Count rows on a few critical tables
|
||||
psql postgresql://mosaic:mosaic@localhost:5433/mosaic -c \
|
||||
"SELECT 'users' as table, COUNT(*) FROM users UNION ALL
|
||||
SELECT 'conversations' as table, COUNT(*) FROM conversations UNION ALL
|
||||
SELECT 'messages' as table, COUNT(*) FROM messages;"
|
||||
```
|
||||
|
||||
Verify a known user or project exists by ID:
|
||||
|
||||
```bash
|
||||
psql postgresql://mosaic:mosaic@localhost:5433/mosaic -c \
|
||||
"SELECT id, email FROM users WHERE email='<your-email>';"
|
||||
```
|
||||
|
||||
Ensure vector embeddings are NULL (if source was PGlite) or populated (if source was postgres + pgvector):
|
||||
|
||||
```bash
|
||||
psql postgresql://mosaic:mosaic@localhost:5433/mosaic -c \
|
||||
"SELECT embedding IS NOT NULL as has_vector FROM insights LIMIT 5;"
|
||||
```
|
||||
|
||||
## Rollback
|
||||
|
||||
There is no in-place rollback. If the migration fails:
|
||||
|
||||
1. Restore the target database from a pre-migration backup
|
||||
2. Investigate the failure logs
|
||||
3. Rerun the migration
|
||||
|
||||
Always test migrations in a staging environment first.
|
||||
@@ -343,3 +343,272 @@ Affected files (storage-tier semantics only — Team/workspace usages unaffected
|
||||
|
||||
- `MVP-T04` (sync `.mosaic/orchestrator/mission.json`) still deferred.
|
||||
- `team` tier rename touches install wizard headless env vars (`MOSAIC_STORAGE_TIER=team`); will need 0.0.x deprecation note in scratchpad if release notes are written this milestone.
|
||||
|
||||
---
|
||||
|
||||
## Session 17 — 2026-04-19 — claude
|
||||
|
||||
**Mode:** Delivery (W1 / FED-M1 execution; resumed after compaction)
|
||||
**Branches landed this run:** `feat/federation-m1-tier-config` (PR #470), `feat/federation-m1-compose` (PR #471), `feat/federation-m1-pgvector` (PR #472)
|
||||
**Branch active at end:** `feat/federation-m1-detector` (FED-M1-04, ready to push)
|
||||
|
||||
**Tasks closed:** FED-M1-01, FED-M1-02, FED-M1-03 (all merged to `main` via squash, CI green, issue #460 still open as milestone).
|
||||
|
||||
**FED-M1-04 — tier-detector:** Worker delivered `apps/gateway/src/bootstrap/tier-detector.ts` (~210 lines) + `tier-detector.spec.ts` (12 tests). Independent code review (sonnet) returned `changes-required` with 3 issues:
|
||||
|
||||
1. CRITICAL: `probeValkey` missing `connectTimeout: 5000` on the ioredis Redis client (defaulted to 10s, violated fail-fast spec).
|
||||
2. IMPORTANT: `probePgvector` catch block did not discriminate "library not installed" (use `pgvector/pgvector:pg17`) from permission errors.
|
||||
3. IMPORTANT: Federated tier silently skipped Valkey probe when `queue.type !== 'bullmq'` (computed Valkey URL conditionally).
|
||||
|
||||
Worker fix-up round addressed all three:
|
||||
|
||||
- L147: `connectTimeout: 5000` added to Redis options
|
||||
- L113-117: catch block branches on `extension "vector" is not available` substring → distinct remediation per failure mode
|
||||
- L206-215: federated branch fails fast with `service: 'config'` if `queue.type !== 'bullmq'`, then probes Valkey unconditionally
|
||||
- 4 new tests (8 → 12 total) cover each fix specifically
|
||||
|
||||
Independent verifier (haiku) confirmed all 6 verification claims (line numbers, test presence, suite green: 12/12 PASS).
|
||||
|
||||
**Process note — review pipeline working as designed:**
|
||||
|
||||
Initial verifier (haiku) on the first delivery returned "OK to ship" but missed the 3 deeper issues that the sonnet code-reviewer caught. This validates the user's "always verify subagent claims independently with another subagent" rule — but specifically with the **right tier** for the task: code review needs sonnet-level reasoning, while haiku is fine for verifying surface claims (line counts, file existence) once review issues are known. Going forward: code review uses sonnet (`feature-dev:code-reviewer`), claim verification uses haiku.
|
||||
|
||||
**Followup tasks tracked but deferred:**
|
||||
|
||||
- #7: `tier=local` hardcoded in gateway-config resume branches (~262, ~317) — pre-existing bug, fix during M1-06 (doctor) or M1-09 (regression).
|
||||
- #8: confirm `packages/config/dist` not git-tracked.
|
||||
|
||||
**Next:** PR for FED-M1-04 → CI wait → merge. Then FED-M1-05 (migration script, codex/sonnet, 10K).
|
||||
|
||||
---
|
||||
|
||||
## Session 18 — 2026-04-19 — FED-M1-07 + FED-M1-08
|
||||
|
||||
**Branches landed this run:** `feat/federation-m1-integration` (PR #476, FED-M1-07), `feat/federation-m1-migrate-test` (PR #477, FED-M1-08)
|
||||
**Branch active at end:** none — both PRs merged to main, branches deleted
|
||||
|
||||
**M1 progress:** 8 of 12 tasks done. Remaining: M1-09 (regression e2e, haiku), M1-10 (security review, sonnet), M1-11 (docs, haiku), M1-12 (close + release, orchestrator).
|
||||
|
||||
### FED-M1-07 — Integration tests for federated tier gateway boot
|
||||
|
||||
Three test files under `apps/gateway/src/__tests__/integration/` gated by `FEDERATED_INTEGRATION=1`:
|
||||
|
||||
- `federated-boot.success.integration.test.ts` — `detectAndAssertTier` resolves; `pg_extension` row for `vector` exists
|
||||
- `federated-boot.pg-unreachable.integration.test.ts` — throws `TierDetectionError` with `service: 'postgres'` when PG port is closed
|
||||
- `federated-pgvector.integration.test.ts` — TEMP table with `vector(3)` column round-trips data
|
||||
|
||||
Independent code review (sonnet) returned VERDICT: B with two IMPORTANT items, both fixed in the same PR:
|
||||
|
||||
- Port 5499 collision risk → replaced with `net.createServer().listen(0)` reserved-port helper
|
||||
- `afterAll` and `sql` scoped outside `describe` → moved both inside `describe.skipIf` block
|
||||
|
||||
Independent surface verifier (haiku) confirmed all claims. 4/4 tests pass live; 4/4 skip cleanly without env var.
|
||||
|
||||
### FED-M1-08 — Migration integration test (caught real P0 bug)
|
||||
|
||||
`packages/storage/src/migrate-tier.integration.test.ts` seeds temp PGlite with cross-table data (users, teams, team_members, conversations, messages), runs `runMigrateTier`, asserts row counts + spot-checks. Gated by `FEDERATED_INTEGRATION=1`.
|
||||
|
||||
**P0 bug surfaced and fixed in same PR:** `DrizzleMigrationSource.readTable()` returns Drizzle's camelCase keys (`emailVerified`, `userId`); `PostgresMigrationTarget.upsertBatch()` was using them verbatim as SQL identifiers, producing `column "emailVerified" does not exist` against real federated PG. The 32 unit tests in M1-05 missed this because both source and target were mocked. Fix: `normaliseSourceRow` now applies `toSnakeCase` (`/[A-Z]/g` → `_<lowercase>`), idempotent on already-snake_case keys.
|
||||
|
||||
Code review (sonnet) returned VERDICT: B with one IMPORTANT and one MINOR, both fixed:
|
||||
|
||||
- `createPgliteDbWithVector` and `runPgliteMigrations` were initially added to `@mosaicstack/db` public exports → moved to `packages/storage/src/test-utils/pglite-with-vector.ts` (avoids polluting prod consumers with WASM bundle)
|
||||
- `afterAll` did not call `cleanTarget` → added before connection close, ensuring orphan rows cleaned even on test panic
|
||||
|
||||
Side change: `packages/storage/package.json` gained `"type": "module"` (codebase convention; required for `import.meta.url` in test-utils). All other workspace packages already declared this.
|
||||
|
||||
### Process notes for this session
|
||||
|
||||
- Review-then-verify pipeline now battle-tested: M1-08 reviewer caught the P0 bug + the public-API leak that the worker would have shipped. Without review, both would have gone to main.
|
||||
- Integration tests are paying for themselves immediately: M1-08 caught a real P0 in M1-05 that 32 mocked unit tests missed. Going forward, **at least one real-services integration test per code-mutating PR** should become a soft norm where feasible.
|
||||
- TASKS.md status updates continue to ride on the matching feature branch (avoids direct-to-main commits).
|
||||
|
||||
**Followup tasks tracked but still deferred (no change):**
|
||||
|
||||
- #7: `tier=local` hardcoded in gateway-config resume branches (~262, ~317)
|
||||
- #8: confirm `packages/config/dist` not git-tracked
|
||||
|
||||
**Next:** FED-M1-09 — standalone regression e2e (haiku canary, ~4K). Verifies that the existing `standalone` tier behavior still works end-to-end on the federation-touched build, since M1 changes touched shared paths (storage, config, gateway boot).
|
||||
|
||||
---
|
||||
|
||||
## Session 19 — 2026-04-19 — FED-M1-09 → FED-M1-12 (M1 close)
|
||||
|
||||
**Branches landed this run:** `feat/federation-m1-regression` (PR #478, M1-09), `feat/federation-m1-security-review` (PR #479, M1-10), `feat/federation-m1-docs` (PR #480, M1-11), `feat/federation-m1-close` (PR #481, M1-12)
|
||||
**Branch active at end:** none — M1 closed, all branches deleted, issue #460 closed, release tag `fed-v0.1.0-m1` published
|
||||
|
||||
**M1 progress:** 12 of 12 tasks done. **Milestone complete.**
|
||||
|
||||
### FED-M1-09 — Standalone regression canary
|
||||
|
||||
Verification-only milestone. Re-ran the existing standalone/local test suites against current `main` (with M1-01 → M1-08 merged):
|
||||
|
||||
- 4 target gateway test files: 148/148 pass (conversation-persistence, cross-user-isolation, resource-ownership, session-hardening)
|
||||
- Full gateway suite: 351 pass, 4 skipped (FEDERATED_INTEGRATION-gated only)
|
||||
- Storage unit tests: 85 pass, 1 skipped (integration-gated)
|
||||
- Top-level `pnpm test`: all green; only env-gated skips
|
||||
|
||||
No regression in standalone or local tier. Federation M1 changes are non-disruptive.
|
||||
|
||||
### FED-M1-10 — Security review (two rounds, 7 findings)
|
||||
|
||||
Independent security review surfaced three high-impact and four medium findings; all fixed in same PR.
|
||||
|
||||
**Round 1 (4 findings):**
|
||||
|
||||
- MEDIUM: Credential leak via `postgres`/`ioredis` driver error messages (DSN strings) re-thrown by `migrate-tier.ts` → caller; `cli.ts:402` outer catch
|
||||
- MEDIUM: Same leak in `tier-detection.ts` `probePostgresMeasured` / `probePgvectorMeasured` → emitted as JSON by `mosaic gateway doctor --json`
|
||||
- LOW-MEDIUM: No advisory lock on `migrate-tier`; two concurrent invocations could both pass `checkTargetPreconditions` (non-atomic) and race
|
||||
- ADVISORY: `SKIP_TABLES` lacked rationale comment
|
||||
|
||||
**Fixes:**
|
||||
|
||||
- New internal helper `packages/storage/src/redact-error.ts` — regex `(postgres(?:ql)?|rediss?):\/\/[^@\s]*@` → `<scheme>://***@`. NOT exported from package public surface. 10 unit tests covering all schemes, multi-URL, no-creds, case-insensitive.
|
||||
- `redactErrMsg` applied at all 5 leak sites
|
||||
- `PostgresMigrationTarget.tryAcquireAdvisoryLock()` / `releaseAdvisoryLock()` using session-scoped `pg_try_advisory_lock(hashtext('mosaic-migrate-tier'))`. Acquired before preflight, released in `finally`. Dry-run skips. Non-blocking.
|
||||
- `SKIP_TABLES` comment expanded with rationale for skipped tables (TTL'd / one-time / env-bound) AND why `accounts` (OAuth) and `provider_credentials` (AI keys) are intentionally migrated (durable user-bound, not deployment-bound).
|
||||
|
||||
**Round 2 (3 findings missed by first round):**
|
||||
|
||||
- HIGH: Round 1 regex only covered `postgres` scheme, not `redis`/`rediss` — extended to `(postgres(?:ql)?|rediss?)`
|
||||
- HIGH: `probeValkeyMeasured` was missed in Round 1 → applied `redactErrMsg`
|
||||
- MEDIUM: `cli.ts:402` migrate-tier outer catch was missed in Round 1 → applied `redactErrMsg`
|
||||
|
||||
**Process validation:** the two-round review pattern proved load-bearing for security work. A single review-then-fix cycle would have shipped the Valkey credential leak.
|
||||
|
||||
### FED-M1-11 — Docs (haiku)
|
||||
|
||||
- `docs/federation/SETUP.md` (119 lines): federated tier setup — what it is, prerequisites, docker compose start, mosaic.config.json snippet, doctor health check, troubleshooting
|
||||
- `docs/guides/migrate-tier.md` (147 lines): when to migrate, dry-run first, what migrates/skips with rationale, idempotency + advisory-lock semantics, no in-place rollback
|
||||
- `README.md` Configuration blurb linking to both
|
||||
- Runbook deferred to FED-M7 per TASKS.md scope rule
|
||||
|
||||
### FED-M1-12 — Aggregate close (this PR)
|
||||
|
||||
- Marked M1-12 done in TASKS.md
|
||||
- MISSION-MANIFEST.md: phase → "M1 complete", progress 1/7, M1 row done with PR range #470-#481, session log appended
|
||||
- This Session 19 entry added
|
||||
- Issue #460 closed via `~/.config/mosaic/tools/git/issue-close.sh -i 460`
|
||||
- Release tag `fed-v0.1.0-m1` created and pushed to gitea
|
||||
|
||||
### M1 PR ledger
|
||||
|
||||
| PR | Task | Branch |
|
||||
| ---- | ----------------------------------------- | ---------------------------------- |
|
||||
| #470 | M1-01 (tier config schema) | feat/federation-m1-tier-config |
|
||||
| #471 | M1-02 (compose overlay) | feat/federation-m1-compose |
|
||||
| #472 | M1-03 (pgvector adapter) | feat/federation-m1-pgvector |
|
||||
| #473 | M1-04 (tier-detector) | feat/federation-m1-detector |
|
||||
| #474 | M1-05 (migrate-tier script) | feat/federation-m1-migrate |
|
||||
| #475 | M1-06 (gateway doctor) | feat/federation-m1-doctor |
|
||||
| #476 | M1-07 (boot integration tests) | feat/federation-m1-integration |
|
||||
| #477 | M1-08 (migrate integration test + P0 fix) | feat/federation-m1-migrate-test |
|
||||
| #478 | M1-09 (standalone regression) | feat/federation-m1-regression |
|
||||
| #479 | M1-10 (security review fixes) | feat/federation-m1-security-review |
|
||||
| #480 | M1-11 (docs) | feat/federation-m1-docs |
|
||||
| #481 | M1-12 (aggregate close) | feat/federation-m1-close |
|
||||
|
||||
### Process learnings (M1 retrospective)
|
||||
|
||||
1. **Two-round security review is non-negotiable for security work.** First round caught postgres credential leaks; second round caught equivalent valkey leaks the worker missed when extending the regex. Single-round would have shipped HIGH severity issues.
|
||||
2. **Real-services integration tests catch what mocked unit tests cannot.** M1-08 caught a P0 in M1-05 (camelCase column names) that 32 mocked unit tests missed because both source and target were mocked. Going forward: at least one real-services test per code-mutating PR where feasible.
|
||||
3. **Test-utils for live services co-locate with consumer, not in shared library.** M1-08 reviewer caught `createPgliteDbWithVector` initially being added to `@mosaicstack/db` public exports — would have polluted prod consumers with WASM bundle. Moved to `packages/storage/src/test-utils/`.
|
||||
4. **Per-task budgets including tests/review/docs more accurate than PRD's implementation-only estimates.** M1 PRD estimated 20K; actual ~74K. Future milestones should budget the full delivery cycle.
|
||||
5. **TASKS.md status updates ride feature branches, never direct-to-main.** Caught one violation early in M1; pattern held for all 12 tasks.
|
||||
6. **Subagent tier matters.** Code review needs sonnet-level reasoning (haiku missed deep issues in M1-04); claim verification (line counts, file existence) is fine on haiku.
|
||||
|
||||
**Followup tasks still deferred (carry forward to M2):**
|
||||
|
||||
- #7: `tier=local` hardcoded in gateway-config resume branches (~262, ~317)
|
||||
- #8: confirm `packages/config/dist` not git-tracked
|
||||
|
||||
**Next mission step:** FED-M2 (Step-CA + grant schema + admin CLI). Per TASKS.md scope rule, M2 will be decomposed when it enters active planning. Issue #461 tracks scope.
|
||||
|
||||
## Session 20 — 2026-04-21 — FED-M2 kickoff
|
||||
|
||||
### Decisions
|
||||
|
||||
- **Workstream split**: parallel CODE (M2-01..M2-13, ~72K) + DEPLOY (DEPLOY-01..DEPLOY-05, ~16K) tracks; re-converge at M2-10 E2E.
|
||||
- **Test hosts**: `mos-test-1.woltje.com` (querying side / Server A), `mos-test-2.woltje.com` (serving side / Server B). Wildcard `*.woltje.com` A→174.137.97.162 already exists; Traefik wildcard cert covers both subdomains. No DNS or cert work needed pre-deploy.
|
||||
- **Portainer access**: requires `PORTAINER_INSECURE=1` flag added to mosaic wrappers (self-signed cert at `https://10.1.1.43:9443`). PR pending on `feat/mosaic-portainer-tls-flag`.
|
||||
- **Image policy**: deploy by digest (immutable) per Mosaic policy. `gateway:fed-v0.1.0-m1` digest = `sha256:9b72e202a9eecc27d31920b87b475b9e96e483c0323acc57856be4b1355db1ec`.
|
||||
|
||||
### DEPLOY-01 — image manifest verified
|
||||
|
||||
- Tag `fed-v0.1.0-m1` exists at `git.mosaicstack.dev/mosaicstack/stack/gateway`
|
||||
- Digest: `sha256:9b72e202a9eecc27d31920b87b475b9e96e483c0323acc57856be4b1355db1ec`
|
||||
- 9 layers, ~530MB total
|
||||
- Use this digest in DEPLOY-02 stack template (do NOT reference `:fed-v0.1.0-m1` tag in stack — pin to digest)
|
||||
|
||||
### Registry auth note
|
||||
|
||||
- Gitea container registry uses Bearer token flow (`/v2/token?service=container_registry&scope=repository:<repo>:pull`)
|
||||
- Username: `jarvis` (NOT `mosaicstack`); password: `gitea.mosaicstack.token` from credentials.json
|
||||
- Direct `Authorization: Bearer <pat>` does NOT work — must exchange PAT for registry token first
|
||||
|
||||
### Active PRs
|
||||
|
||||
- #483 — docs: M2 mission planning (TASKS decomposition + manifest update) — CI running
|
||||
- (pending) `feat/mosaic-portainer-tls-flag` — wrapper PORTAINER_INSECURE flag (sonnet subagent in progress)
|
||||
- (pending) `feat/federation-m2-schema` — FED-M2-01 DB schema migration (sonnet subagent in progress)
|
||||
|
||||
### MISSION-MANIFEST layout fix
|
||||
|
||||
- Initial M2 commit had Test Infrastructure block inserted by lint-staged prettier between "Last Updated" and "Parent Mission" — split mission frontmatter
|
||||
- Fixed in 3d001fdb: moved Parent Mission back to frontmatter, kept Test Infrastructure as standalone H2 between Mission and Context
|
||||
|
||||
## Session 21 — 2026-04-21/22 — DEPLOY-02 merged, gateway image bug discovered, M2-01 in remediation
|
||||
|
||||
### PRs merged
|
||||
|
||||
- **#483** — docs(federation): M2 mission planning (TASKS decomposition + manifest update)
|
||||
- **#484** — feat(mosaic-portainer): PORTAINER_INSECURE flag for self-signed TLS (wrapper sync to `~/.config/mosaic/tools/portainer/` done manually due to broken `mosaic upgrade` `set -o pipefail` on dash)
|
||||
- **#485** — feat(deploy): portainer stack template `deploy/portainer/federated-test.stack.yml` for federation test instances [DEPLOY-02]
|
||||
|
||||
### Stack deployed (mos-test-1, mos-test-2)
|
||||
|
||||
- Both stacks created on Portainer endpoint 3 (`local` Swarm @ 10.1.1.43, the only endpoint with traefik-public + woltje.com wildcard cert)
|
||||
- Swarm ID `l7z67tfpd4bvj4979ufpkyi50`
|
||||
- Image pinned to digest `sha256:9b72e202a9eecc27d31920b87b475b9e96e483c0323acc57856be4b1355db1ec`
|
||||
- Traefik labels target `${HOST_FQDN}` per env
|
||||
|
||||
### CRITICAL FINDING — gateway image runtime-broken
|
||||
|
||||
- `docker run` against `gateway:fed-v0.1.0-m1` fails immediately:
|
||||
`Error [ERR_MODULE_NOT_FOUND]: Cannot find package 'dotenv' imported from /app/dist/main.js`
|
||||
- Root cause: `docker/gateway.Dockerfile` copies `/app/node_modules` from builder — but pnpm puts deps in the content-addressed `.pnpm/` store with symlinks at `apps/gateway/node_modules/*`. The runner stage misses the symlinks → Node can't resolve workspace deps.
|
||||
- M1 release was never runtime-tested as a stripped container; CI passed because tests run in dev tree where pnpm symlinks are intact.
|
||||
- **Fix in flight** (subagent `a78a9ab0ddae91fbc`): switch builder to `pnpm --filter @mosaic/gateway --prod deploy /deploy`, then runner copies `/deploy/node_modules` + `/deploy/dist` + `/deploy/package.json`.
|
||||
|
||||
### M2-01 schema review verdict — NEEDS CHANGES
|
||||
|
||||
- PR #486 (`feat/federation-m2-schema`) — independent reviewer (sonnet) found 2 real issues:
|
||||
1. `federation_audit_log` time-range indexes missing `.desc()` on `created_at` (3 places)
|
||||
2. Reserved columns missing per TASKS.md M2-01 spec: `query_hash`, `outcome`, `bytes_out` (M4 will write; spec said reserve now)
|
||||
- Also notes (advisory): subject_user_id correctly `text` (matches BetterAuth users.id; spec defect, not code defect); peer→grant cascade test not present (would be trivial to add)
|
||||
- **Remediation in flight** (subagent `a673dd9355dc26f82` in worktree `agent-a4404ac1`): apply DESC + reserved cols, regenerate migration in place (preferred) or stack 0009 (fallback), force-push, post PR comment.
|
||||
|
||||
### Process notes
|
||||
|
||||
- Branch race incident: schema subagent + wrapper subagent both ran in main checkout → schema files appeared on wrapper branch. Recovered by TaskStop, `git checkout --` to clean, respawned schema subagent with `isolation: "worktree"`. **Rule going forward:** any subagent doing code edits gets `isolation: "worktree"` unless work is single-file and the orchestrator confirms no other branch will touch overlapping files.
|
||||
- `pr-create.sh` shell-quotes backticks badly → use `tea pr create --repo mosaicstack/stack` directly (matches CLI-skill behavior). Will leave a followup to harden pr-create.sh.
|
||||
- Gitea registry auth: bearer-token exchange flow (`/v2/token?service=container_registry&scope=repository:<repo>:pull`) — direct `Authorization: Bearer <pat>` returns 401.
|
||||
- Portainer Swarm stack create endpoint: `POST /api/stacks/create/swarm/string?endpointId=<id>` (NOT `/api/stacks?type=1` — deprecated and rejected with 400).
|
||||
|
||||
### In-flight at compaction boundary
|
||||
|
||||
- Subagent `a78a9ab0ddae91fbc` — Dockerfile pnpm-deploy fix → PR (not yet opened at handoff)
|
||||
- Subagent `a673dd9355dc26f82` — M2-01 schema remediation (DESC + reserved cols) → force-push to PR #486
|
||||
- Both will trigger CI; orchestrator must independently re-review fixes (especially the security-adjacent schema work) per "always verify subagent claims" rule.
|
||||
|
||||
### Next after subagents return
|
||||
|
||||
1. Independent re-review of schema remediation (different subagent, fresh context)
|
||||
2. Merge #486 if green
|
||||
3. Merge Dockerfile fix PR if green → triggers Kaniko CI rebuild → capture new digest
|
||||
4. Update `deploy/portainer/federated-test.stack.yml` to new digest in a small PR
|
||||
5. Redeploy mos-test-1 + mos-test-2 (Portainer stack update via API)
|
||||
6. Verify HTTPS reachability + `/health` endpoint at both hosts
|
||||
7. DEPLOY-03/04 acceptance probes (`mosaic gateway doctor --json`, pgvector `vector(3)` round-trip)
|
||||
8. DEPLOY-05: author `docs/federation/TEST-INFRA.md`
|
||||
9. M2-02 (Step-CA sidecar) kicks off after image health is green
|
||||
|
||||
@@ -28,6 +28,7 @@ export default tseslint.config(
|
||||
'apps/web/e2e/helpers/*.ts',
|
||||
'apps/web/playwright.config.ts',
|
||||
'apps/gateway/vitest.config.ts',
|
||||
'packages/storage/vitest.config.ts',
|
||||
'packages/mosaic/__tests__/*.ts',
|
||||
],
|
||||
},
|
||||
|
||||
1
infra/step-ca/dev-password.example
Normal file
1
infra/step-ca/dev-password.example
Normal file
@@ -0,0 +1 @@
|
||||
dev-only-step-ca-password-do-not-use-in-production
|
||||
90
infra/step-ca/init.sh
Executable file
90
infra/step-ca/init.sh
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/bin/sh
|
||||
# infra/step-ca/init.sh
|
||||
#
|
||||
# Idempotent first-boot initialiser for the Mosaic Federation CA.
|
||||
#
|
||||
# On the first run (no /home/step/config/ca.json present) this script:
|
||||
# 1. Initialises Step-CA with a JWK provisioner named "mosaic-fed".
|
||||
# 2. Writes the CA configuration to the persistent volume at /home/step.
|
||||
# 3. Copies the federation X.509 template into the CA config directory.
|
||||
# 4. Patches the mosaic-fed provisioner entry in ca.json to reference the
|
||||
# template via options.x509.templateFile (using jq — must be installed
|
||||
# in the container image).
|
||||
#
|
||||
# On subsequent runs (config already exists) this script skips init and
|
||||
# starts the CA directly.
|
||||
#
|
||||
# The provisioner name "mosaic-fed" is consumed by:
|
||||
# apps/gateway/src/federation/ca.service.ts (added in M2-04)
|
||||
#
|
||||
# Password source:
|
||||
# Dev: mounted from ./infra/step-ca/dev-password via bind mount.
|
||||
# Prod: mounted from a Docker secret at /run/secrets/ca_password.
|
||||
#
|
||||
# OID template:
|
||||
# infra/step-ca/templates/federation.tpl emits custom OID extensions:
|
||||
# 1.3.6.1.4.1.99999.1 — mosaic_grant_id
|
||||
# 1.3.6.1.4.1.99999.2 — mosaic_subject_user_id
|
||||
|
||||
set -e
|
||||
|
||||
CA_CONFIG="/home/step/config/ca.json"
|
||||
PASSWORD_FILE="/run/secrets/ca_password"
|
||||
TEMPLATE_SRC="/etc/step-ca-templates/federation.tpl"
|
||||
TEMPLATE_DEST="/home/step/templates/federation.tpl"
|
||||
|
||||
if [ ! -f "${CA_CONFIG}" ]; then
|
||||
echo "[step-ca init] First boot detected — initialising Mosaic Federation CA..."
|
||||
|
||||
step ca init \
|
||||
--name "Mosaic Federation CA" \
|
||||
--dns "localhost" \
|
||||
--dns "step-ca" \
|
||||
--address ":9000" \
|
||||
--provisioner "mosaic-fed" \
|
||||
--password-file "${PASSWORD_FILE}" \
|
||||
--provisioner-password-file "${PASSWORD_FILE}" \
|
||||
--no-db
|
||||
|
||||
echo "[step-ca init] CA initialised."
|
||||
|
||||
# Copy the X.509 template into the Step-CA config directory.
|
||||
if [ -f "${TEMPLATE_SRC}" ]; then
|
||||
mkdir -p /home/step/templates
|
||||
cp "${TEMPLATE_SRC}" "${TEMPLATE_DEST}"
|
||||
echo "[step-ca init] Federation X.509 template copied to ${TEMPLATE_DEST}."
|
||||
else
|
||||
echo "[step-ca init] WARNING: Template source ${TEMPLATE_SRC} not found — skipping copy."
|
||||
fi
|
||||
|
||||
# Wire the template into the mosaic-fed provisioner via jq.
|
||||
# This is idempotent: the block only runs once (first boot).
|
||||
#
|
||||
# jq filter: find the provisioner entry with name "mosaic-fed" and set
|
||||
# .options.x509.templateFile to the absolute path of the template.
|
||||
# All other provisioners and config keys are left unchanged.
|
||||
if [ -f "${TEMPLATE_DEST}" ] && command -v jq > /dev/null 2>&1; then
|
||||
echo "[step-ca init] Patching mosaic-fed provisioner with X.509 template..."
|
||||
TEMP_CONFIG="${CA_CONFIG}.tmp"
|
||||
jq --arg tpl "${TEMPLATE_DEST}" '
|
||||
.authority.provisioners |= map(
|
||||
if .name == "mosaic-fed" then
|
||||
.options.x509.templateFile = $tpl
|
||||
else
|
||||
.
|
||||
end
|
||||
)
|
||||
' "${CA_CONFIG}" > "${TEMP_CONFIG}" && mv "${TEMP_CONFIG}" "${CA_CONFIG}"
|
||||
echo "[step-ca init] Provisioner patched."
|
||||
elif ! command -v jq > /dev/null 2>&1; then
|
||||
echo "[step-ca init] WARNING: jq not found — skipping provisioner template patch."
|
||||
echo "[step-ca init] Install jq in the step-ca image to enable automatic template wiring."
|
||||
fi
|
||||
|
||||
echo "[step-ca init] Startup complete."
|
||||
else
|
||||
echo "[step-ca init] Config already exists — skipping init."
|
||||
fi
|
||||
|
||||
echo "[step-ca init] Starting Step-CA on :9000..."
|
||||
exec step-ca /home/step/config/ca.json --password-file "${PASSWORD_FILE}"
|
||||
56
infra/step-ca/templates/federation.tpl
Normal file
56
infra/step-ca/templates/federation.tpl
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"subject": {{ toJson .Subject }},
|
||||
"sans": {{ toJson .SANs }},
|
||||
|
||||
{{- /*
|
||||
Mosaic Federation X.509 Certificate Template
|
||||
============================================
|
||||
Provisioner: mosaic-fed (JWK)
|
||||
Implemented: FED-M2-04
|
||||
|
||||
This template emits two custom OID extensions carrying Mosaic federation
|
||||
identifiers. The OTT token (built by CaService.buildOtt) MUST include the
|
||||
claims `mosaic_grant_id` and `mosaic_subject_user_id` as top-level JWT
|
||||
claims. step-ca exposes them under `.Token.<claim>` in this template.
|
||||
|
||||
OID Registry (Mosaic Internal Arc — 1.3.6.1.4.1.99999):
|
||||
1.3.6.1.4.1.99999.1 mosaic_grant_id (UUID, 36 ASCII chars)
|
||||
1.3.6.1.4.1.99999.2 mosaic_subject_user_id (UUID, 36 ASCII chars)
|
||||
|
||||
DER encoding for each extension value (ASN.1 UTF8String):
|
||||
Tag = 0x0C (UTF8String)
|
||||
Length = 0x24 (decimal 36 — the fixed length of a UUID string)
|
||||
Value = 36 ASCII bytes of the UUID
|
||||
|
||||
The `printf` below builds the raw TLV bytes then base64-encodes them.
|
||||
step-ca expects the `value` field to be base64-encoded raw DER bytes.
|
||||
|
||||
Fail-loud contract:
|
||||
If either claim is missing from the token the printf will produce a
|
||||
zero-length UUID field, making the extension malformed. step-ca will
|
||||
reject the certificate rather than issuing one without the required OIDs.
|
||||
Silent OID stripping is NEVER tolerated.
|
||||
|
||||
Step-CA template reference:
|
||||
https://smallstep.com/docs/step-ca/templates
|
||||
*/ -}}
|
||||
|
||||
"extensions": [
|
||||
{
|
||||
"id": "1.3.6.1.4.1.99999.1",
|
||||
"critical": false,
|
||||
"value": "{{ printf "\x0c%c%s" (len .Token.mosaic_grant_id) .Token.mosaic_grant_id | b64enc }}"
|
||||
},
|
||||
{
|
||||
"id": "1.3.6.1.4.1.99999.2",
|
||||
"critical": false,
|
||||
"value": "{{ printf "\x0c%c%s" (len .Token.mosaic_subject_user_id) .Token.mosaic_subject_user_id | b64enc }}"
|
||||
}
|
||||
],
|
||||
|
||||
"keyUsage": ["digitalSignature"],
|
||||
"extKeyUsage": ["clientAuth"],
|
||||
"basicConstraints": {
|
||||
"isCA": false
|
||||
}
|
||||
}
|
||||
@@ -10,3 +10,4 @@ export {
|
||||
type SsoTeamSyncConfig,
|
||||
type SupportedSsoProviderId,
|
||||
} from './sso.js';
|
||||
export { seal, unseal } from './seal.js';
|
||||
|
||||
52
packages/auth/src/seal.ts
Normal file
52
packages/auth/src/seal.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import { createCipheriv, createDecipheriv, createHash, randomBytes } from 'node:crypto';
|
||||
|
||||
const ALGORITHM = 'aes-256-gcm';
|
||||
const IV_LENGTH = 12; // 96-bit IV for GCM
|
||||
const TAG_LENGTH = 16; // 128-bit auth tag
|
||||
|
||||
/**
|
||||
* Derive a 32-byte AES-256 key from BETTER_AUTH_SECRET using SHA-256.
|
||||
* Throws if BETTER_AUTH_SECRET is not set.
|
||||
*/
|
||||
function deriveKey(): Buffer {
|
||||
const secret = process.env['BETTER_AUTH_SECRET'];
|
||||
if (!secret) {
|
||||
throw new Error('BETTER_AUTH_SECRET is not set — cannot derive encryption key');
|
||||
}
|
||||
return createHash('sha256').update(secret).digest();
|
||||
}
|
||||
|
||||
/**
|
||||
* Seal a plaintext string using AES-256-GCM.
|
||||
* Output format: base64(IV || authTag || ciphertext)
|
||||
*/
|
||||
export function seal(plaintext: string): string {
|
||||
const key = deriveKey();
|
||||
const iv = randomBytes(IV_LENGTH);
|
||||
const cipher = createCipheriv(ALGORITHM, key, iv);
|
||||
|
||||
const encrypted = Buffer.concat([cipher.update(plaintext, 'utf8'), cipher.final()]);
|
||||
const authTag = cipher.getAuthTag();
|
||||
|
||||
const combined = Buffer.concat([iv, authTag, encrypted]);
|
||||
return combined.toString('base64');
|
||||
}
|
||||
|
||||
/**
|
||||
* Unseal a value sealed by `seal()`.
|
||||
* Throws on authentication failure (tampered data) or if BETTER_AUTH_SECRET is unset.
|
||||
*/
|
||||
export function unseal(encoded: string): string {
|
||||
const key = deriveKey();
|
||||
const combined = Buffer.from(encoded, 'base64');
|
||||
|
||||
const iv = combined.subarray(0, IV_LENGTH);
|
||||
const authTag = combined.subarray(IV_LENGTH, IV_LENGTH + TAG_LENGTH);
|
||||
const ciphertext = combined.subarray(IV_LENGTH + TAG_LENGTH);
|
||||
|
||||
const decipher = createDecipheriv(ALGORITHM, key, iv);
|
||||
decipher.setAuthTag(authTag);
|
||||
|
||||
const decrypted = Buffer.concat([decipher.update(ciphertext), decipher.final()]);
|
||||
return decrypted.toString('utf8');
|
||||
}
|
||||
@@ -5,4 +5,5 @@ export {
|
||||
DEFAULT_FEDERATED_CONFIG,
|
||||
loadConfig,
|
||||
validateConfig,
|
||||
detectFromEnv,
|
||||
} from './mosaic-config.js';
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import {
|
||||
validateConfig,
|
||||
detectFromEnv,
|
||||
DEFAULT_LOCAL_CONFIG,
|
||||
DEFAULT_STANDALONE_CONFIG,
|
||||
DEFAULT_FEDERATED_CONFIG,
|
||||
@@ -106,4 +107,64 @@ describe('DEFAULT_* config constants', () => {
|
||||
const url = (DEFAULT_FEDERATED_CONFIG.storage as { url: string }).url;
|
||||
expect(url).toContain('5433');
|
||||
});
|
||||
|
||||
it('DEFAULT_FEDERATED_CONFIG has enableVector=true on storage', () => {
|
||||
const storage = DEFAULT_FEDERATED_CONFIG.storage as {
|
||||
type: string;
|
||||
url: string;
|
||||
enableVector?: boolean;
|
||||
};
|
||||
expect(storage.enableVector).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('detectFromEnv — tier env-var routing', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
// Work on a fresh copy so individual tests can set/delete keys freely.
|
||||
process.env = { ...originalEnv };
|
||||
delete process.env['MOSAIC_STORAGE_TIER'];
|
||||
delete process.env['DATABASE_URL'];
|
||||
delete process.env['VALKEY_URL'];
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('no env vars → returns local config', () => {
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('local');
|
||||
expect(config.storage.type).toBe('pglite');
|
||||
expect(config.memory.type).toBe('keyword');
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=federated alone → returns federated config with enableVector=true', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('federated');
|
||||
expect(config.memory.type).toBe('pgvector');
|
||||
const storage = config.storage as { type: string; enableVector?: boolean };
|
||||
expect(storage.enableVector).toBe(true);
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=federated + DATABASE_URL → uses the URL and still has enableVector=true', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'federated';
|
||||
process.env['DATABASE_URL'] = 'postgresql://custom:pass@db.example.com:5432/mydb';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('federated');
|
||||
const storage = config.storage as { type: string; url: string; enableVector?: boolean };
|
||||
expect(storage.url).toBe('postgresql://custom:pass@db.example.com:5432/mydb');
|
||||
expect(storage.enableVector).toBe(true);
|
||||
expect(config.memory.type).toBe('pgvector');
|
||||
});
|
||||
|
||||
it('MOSAIC_STORAGE_TIER=standalone alone → returns standalone-shaped config (not local)', () => {
|
||||
process.env['MOSAIC_STORAGE_TIER'] = 'standalone';
|
||||
const config = detectFromEnv();
|
||||
expect(config.tier).toBe('standalone');
|
||||
expect(config.storage.type).toBe('postgres');
|
||||
expect(config.memory.type).toBe('keyword');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -40,7 +40,11 @@ export const DEFAULT_STANDALONE_CONFIG: MosaicConfig = {
|
||||
|
||||
export const DEFAULT_FEDERATED_CONFIG: MosaicConfig = {
|
||||
tier: 'federated',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic' },
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@localhost:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'bullmq' },
|
||||
memory: { type: 'pgvector' },
|
||||
};
|
||||
@@ -119,7 +123,49 @@ export function validateConfig(raw: unknown): MosaicConfig {
|
||||
/* Loader */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
function detectFromEnv(): MosaicConfig {
|
||||
export function detectFromEnv(): MosaicConfig {
|
||||
const tier = process.env['MOSAIC_STORAGE_TIER'];
|
||||
|
||||
if (tier === 'federated') {
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_FEDERATED_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
enableVector: true,
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: process.env['VALKEY_URL'],
|
||||
},
|
||||
};
|
||||
}
|
||||
// MOSAIC_STORAGE_TIER=federated without DATABASE_URL — use the default
|
||||
// federated config (port 5433, enableVector: true, pgvector memory).
|
||||
return DEFAULT_FEDERATED_CONFIG;
|
||||
}
|
||||
|
||||
if (tier === 'standalone') {
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_STANDALONE_CONFIG,
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: process.env['DATABASE_URL'],
|
||||
},
|
||||
queue: {
|
||||
type: 'bullmq',
|
||||
url: process.env['VALKEY_URL'],
|
||||
},
|
||||
};
|
||||
}
|
||||
// MOSAIC_STORAGE_TIER=standalone without DATABASE_URL — use the default
|
||||
// standalone config instead of silently falling back to local.
|
||||
return DEFAULT_STANDALONE_CONFIG;
|
||||
}
|
||||
|
||||
// Legacy: DATABASE_URL set without MOSAIC_STORAGE_TIER — treat as standalone.
|
||||
if (process.env['DATABASE_URL']) {
|
||||
return {
|
||||
...DEFAULT_STANDALONE_CONFIG,
|
||||
@@ -133,6 +179,7 @@ function detectFromEnv(): MosaicConfig {
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return DEFAULT_LOCAL_CONFIG;
|
||||
}
|
||||
|
||||
|
||||
75
packages/db/drizzle/0008_smart_lyja.sql
Normal file
75
packages/db/drizzle/0008_smart_lyja.sql
Normal file
@@ -0,0 +1,75 @@
|
||||
CREATE TYPE "public"."grant_status" AS ENUM('active', 'revoked', 'expired');--> statement-breakpoint
|
||||
CREATE TYPE "public"."peer_state" AS ENUM('pending', 'active', 'suspended', 'revoked');--> statement-breakpoint
|
||||
CREATE TABLE "admin_tokens" (
|
||||
"id" text PRIMARY KEY NOT NULL,
|
||||
"user_id" text NOT NULL,
|
||||
"token_hash" text NOT NULL,
|
||||
"label" text NOT NULL,
|
||||
"scope" text DEFAULT 'admin' NOT NULL,
|
||||
"expires_at" timestamp with time zone,
|
||||
"last_used_at" timestamp with time zone,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "federation_audit_log" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"request_id" text NOT NULL,
|
||||
"peer_id" uuid,
|
||||
"subject_user_id" text,
|
||||
"grant_id" uuid,
|
||||
"verb" text NOT NULL,
|
||||
"resource" text NOT NULL,
|
||||
"status_code" integer NOT NULL,
|
||||
"result_count" integer,
|
||||
"denied_reason" text,
|
||||
"latency_ms" integer,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"query_hash" text,
|
||||
"outcome" text,
|
||||
"bytes_out" integer
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "federation_grants" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"subject_user_id" text NOT NULL,
|
||||
"peer_id" uuid NOT NULL,
|
||||
"scope" jsonb NOT NULL,
|
||||
"status" "grant_status" DEFAULT 'active' NOT NULL,
|
||||
"expires_at" timestamp with time zone,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"revoked_at" timestamp with time zone,
|
||||
"revoked_reason" text
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "federation_peers" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"common_name" text NOT NULL,
|
||||
"display_name" text NOT NULL,
|
||||
"cert_pem" text NOT NULL,
|
||||
"cert_serial" text NOT NULL,
|
||||
"cert_not_after" timestamp with time zone NOT NULL,
|
||||
"client_key_pem" text,
|
||||
"state" "peer_state" DEFAULT 'pending' NOT NULL,
|
||||
"endpoint_url" text,
|
||||
"last_seen_at" timestamp with time zone,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"revoked_at" timestamp with time zone,
|
||||
CONSTRAINT "federation_peers_common_name_unique" UNIQUE("common_name"),
|
||||
CONSTRAINT "federation_peers_cert_serial_unique" UNIQUE("cert_serial")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "admin_tokens" ADD CONSTRAINT "admin_tokens_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "federation_audit_log" ADD CONSTRAINT "federation_audit_log_peer_id_federation_peers_id_fk" FOREIGN KEY ("peer_id") REFERENCES "public"."federation_peers"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "federation_audit_log" ADD CONSTRAINT "federation_audit_log_subject_user_id_users_id_fk" FOREIGN KEY ("subject_user_id") REFERENCES "public"."users"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "federation_audit_log" ADD CONSTRAINT "federation_audit_log_grant_id_federation_grants_id_fk" FOREIGN KEY ("grant_id") REFERENCES "public"."federation_grants"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "federation_grants" ADD CONSTRAINT "federation_grants_subject_user_id_users_id_fk" FOREIGN KEY ("subject_user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "federation_grants" ADD CONSTRAINT "federation_grants_peer_id_federation_peers_id_fk" FOREIGN KEY ("peer_id") REFERENCES "public"."federation_peers"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
CREATE INDEX "admin_tokens_user_id_idx" ON "admin_tokens" USING btree ("user_id");--> statement-breakpoint
|
||||
CREATE UNIQUE INDEX "admin_tokens_hash_idx" ON "admin_tokens" USING btree ("token_hash");--> statement-breakpoint
|
||||
CREATE INDEX "federation_audit_log_peer_created_at_idx" ON "federation_audit_log" USING btree ("peer_id","created_at" DESC NULLS LAST);--> statement-breakpoint
|
||||
CREATE INDEX "federation_audit_log_subject_created_at_idx" ON "federation_audit_log" USING btree ("subject_user_id","created_at" DESC NULLS LAST);--> statement-breakpoint
|
||||
CREATE INDEX "federation_audit_log_created_at_idx" ON "federation_audit_log" USING btree ("created_at" DESC NULLS LAST);--> statement-breakpoint
|
||||
CREATE INDEX "federation_grants_subject_status_idx" ON "federation_grants" USING btree ("subject_user_id","status");--> statement-breakpoint
|
||||
CREATE INDEX "federation_grants_peer_status_idx" ON "federation_grants" USING btree ("peer_id","status");--> statement-breakpoint
|
||||
CREATE INDEX "federation_peers_cert_serial_idx" ON "federation_peers" USING btree ("cert_serial");--> statement-breakpoint
|
||||
CREATE INDEX "federation_peers_state_idx" ON "federation_peers" USING btree ("state");
|
||||
2
packages/db/drizzle/0009_federation_grant_pending.sql
Normal file
2
packages/db/drizzle/0009_federation_grant_pending.sql
Normal file
@@ -0,0 +1,2 @@
|
||||
ALTER TYPE "public"."grant_status" ADD VALUE 'pending' BEFORE 'active';--> statement-breakpoint
|
||||
ALTER TABLE "federation_grants" ALTER COLUMN "status" SET DEFAULT 'pending';
|
||||
11
packages/db/drizzle/0010_federation_enrollment_tokens.sql
Normal file
11
packages/db/drizzle/0010_federation_enrollment_tokens.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE "federation_enrollment_tokens" (
|
||||
"token" text PRIMARY KEY NOT NULL,
|
||||
"grant_id" uuid NOT NULL,
|
||||
"peer_id" uuid NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL,
|
||||
"used_at" timestamp with time zone,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "federation_enrollment_tokens" ADD CONSTRAINT "federation_enrollment_tokens_grant_id_federation_grants_id_fk" FOREIGN KEY ("grant_id") REFERENCES "public"."federation_grants"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "federation_enrollment_tokens" ADD CONSTRAINT "federation_enrollment_tokens_peer_id_federation_peers_id_fk" FOREIGN KEY ("peer_id") REFERENCES "public"."federation_peers"("id") ON DELETE cascade ON UPDATE no action;
|
||||
3375
packages/db/drizzle/meta/0008_snapshot.json
Normal file
3375
packages/db/drizzle/meta/0008_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
3376
packages/db/drizzle/meta/0009_snapshot.json
Normal file
3376
packages/db/drizzle/meta/0009_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
3453
packages/db/drizzle/meta/0010_snapshot.json
Normal file
3453
packages/db/drizzle/meta/0010_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -57,6 +57,27 @@
|
||||
"when": 1774227064500,
|
||||
"tag": "0006_swift_shen",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 8,
|
||||
"version": "7",
|
||||
"when": 1776822435828,
|
||||
"tag": "0008_smart_lyja",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 9,
|
||||
"version": "7",
|
||||
"when": 1745280000000,
|
||||
"tag": "0009_federation_grant_pending",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 10,
|
||||
"version": "7",
|
||||
"when": 1745366400000,
|
||||
"tag": "0010_federation_enrollment_tokens",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
424
packages/db/src/federation.integration.test.ts
Normal file
424
packages/db/src/federation.integration.test.ts
Normal file
@@ -0,0 +1,424 @@
|
||||
/**
|
||||
* FED-M2-01 — Integration test: federation DB schema (peers / grants / audit_log).
|
||||
*
|
||||
* Prereq: docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
* (or any postgres with the mosaic schema already applied)
|
||||
* Run: FEDERATED_INTEGRATION=1 pnpm --filter @mosaicstack/db test src/federation.integration.test.ts
|
||||
*
|
||||
* Skipped when FEDERATED_INTEGRATION !== '1'.
|
||||
*
|
||||
* Strategy:
|
||||
* - Applies the federation migration SQL directly (idempotent: CREATE TYPE/TABLE
|
||||
* with IF NOT EXISTS guards applied via inline SQL before the migration DDL).
|
||||
* - Assumes the base schema (users table etc.) already exists in the target DB.
|
||||
* - All test rows use the `fed-m2-01-` prefix; cleanup in afterAll.
|
||||
*
|
||||
* Coverage:
|
||||
* 1. Federation tables + enums apply cleanly against the existing schema.
|
||||
* 2. Insert a sample user + peer + grant + audit row; verify round-trip.
|
||||
* 3. FK cascade: deleting the user cascades to federation_grants.
|
||||
* 4. FK set-null: deleting the peer sets federation_audit_log.peer_id to NULL.
|
||||
* 5. Enum constraint: inserting an invalid status/state value throws a DB error.
|
||||
* 6. Unique constraint: duplicate cert_serial throws a DB error.
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import { afterAll, beforeAll, describe, expect, it } from 'vitest';
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
|
||||
const PG_URL = process.env['DATABASE_URL'] ?? 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
||||
|
||||
/** Recognisable test-row prefix for safe cleanup without full-table truncation. */
|
||||
const T = 'fed-m2-01';
|
||||
|
||||
// Deterministic IDs (UUID format required for uuid PK columns: 8-4-4-4-12 hex digits).
|
||||
const PEER1_ID = `f2000001-0000-4000-8000-000000000001`;
|
||||
const PEER2_ID = `f2000002-0000-4000-8000-000000000002`;
|
||||
const USER1_ID = `${T}-user-1`;
|
||||
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
|
||||
beforeAll(async () => {
|
||||
if (!run) return;
|
||||
sql = postgres(PG_URL, { max: 1, connect_timeout: 10, idle_timeout: 10 });
|
||||
|
||||
// Apply the federation enums and tables idempotently.
|
||||
// This mirrors the migration file but uses IF NOT EXISTS guards so it can run
|
||||
// against a DB that may not have had drizzle migrations tracked.
|
||||
await sql`
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE peer_state AS ENUM ('pending', 'active', 'suspended', 'revoked');
|
||||
EXCEPTION WHEN duplicate_object THEN NULL;
|
||||
END $$
|
||||
`;
|
||||
await sql`
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE grant_status AS ENUM ('active', 'revoked', 'expired');
|
||||
EXCEPTION WHEN duplicate_object THEN NULL;
|
||||
END $$
|
||||
`;
|
||||
await sql`
|
||||
CREATE TABLE IF NOT EXISTS federation_peers (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
common_name text NOT NULL,
|
||||
display_name text NOT NULL,
|
||||
cert_pem text NOT NULL,
|
||||
cert_serial text NOT NULL,
|
||||
cert_not_after timestamp with time zone NOT NULL,
|
||||
client_key_pem text,
|
||||
state peer_state NOT NULL DEFAULT 'pending',
|
||||
endpoint_url text,
|
||||
last_seen_at timestamp with time zone,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT now(),
|
||||
revoked_at timestamp with time zone,
|
||||
CONSTRAINT federation_peers_common_name_unique UNIQUE (common_name),
|
||||
CONSTRAINT federation_peers_cert_serial_unique UNIQUE (cert_serial)
|
||||
)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_peers_cert_serial_idx ON federation_peers (cert_serial)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_peers_state_idx ON federation_peers (state)
|
||||
`;
|
||||
await sql`
|
||||
CREATE TABLE IF NOT EXISTS federation_grants (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
subject_user_id text NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
peer_id uuid NOT NULL REFERENCES federation_peers(id) ON DELETE CASCADE,
|
||||
scope jsonb NOT NULL,
|
||||
status grant_status NOT NULL DEFAULT 'active',
|
||||
expires_at timestamp with time zone,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT now(),
|
||||
revoked_at timestamp with time zone,
|
||||
revoked_reason text
|
||||
)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_grants_subject_status_idx ON federation_grants (subject_user_id, status)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_grants_peer_status_idx ON federation_grants (peer_id, status)
|
||||
`;
|
||||
await sql`
|
||||
CREATE TABLE IF NOT EXISTS federation_audit_log (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
request_id text NOT NULL,
|
||||
peer_id uuid REFERENCES federation_peers(id) ON DELETE SET NULL,
|
||||
subject_user_id text REFERENCES users(id) ON DELETE SET NULL,
|
||||
grant_id uuid REFERENCES federation_grants(id) ON DELETE SET NULL,
|
||||
verb text NOT NULL,
|
||||
resource text NOT NULL,
|
||||
status_code integer NOT NULL,
|
||||
result_count integer,
|
||||
denied_reason text,
|
||||
latency_ms integer,
|
||||
created_at timestamp with time zone NOT NULL DEFAULT now(),
|
||||
query_hash text,
|
||||
outcome text,
|
||||
bytes_out integer
|
||||
)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_audit_log_peer_created_at_idx
|
||||
ON federation_audit_log (peer_id, created_at DESC NULLS LAST)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_audit_log_subject_created_at_idx
|
||||
ON federation_audit_log (subject_user_id, created_at DESC NULLS LAST)
|
||||
`;
|
||||
await sql`
|
||||
CREATE INDEX IF NOT EXISTS federation_audit_log_created_at_idx
|
||||
ON federation_audit_log (created_at DESC NULLS LAST)
|
||||
`;
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
if (!sql) return;
|
||||
|
||||
// Cleanup in FK-safe order (children before parents).
|
||||
await sql`DELETE FROM federation_audit_log WHERE request_id LIKE ${T + '%'}`.catch(() => {});
|
||||
await sql`
|
||||
DELETE FROM federation_grants
|
||||
WHERE subject_user_id LIKE ${T + '%'}
|
||||
OR revoked_reason LIKE ${T + '%'}
|
||||
`.catch(() => {});
|
||||
await sql`DELETE FROM federation_peers WHERE common_name LIKE ${T + '%'}`.catch(() => {});
|
||||
await sql`DELETE FROM users WHERE id LIKE ${T + '%'}`.catch(() => {});
|
||||
await sql.end({ timeout: 3 }).catch(() => {});
|
||||
});
|
||||
|
||||
describe.skipIf(!run)('federation schema — integration', () => {
|
||||
// ── 1. Insert sample rows ──────────────────────────────────────────────────
|
||||
|
||||
it('inserts a user, peer, grant, and audit row without constraint violation', async () => {
|
||||
const certPem = '-----BEGIN CERTIFICATE-----\nMIItest\n-----END CERTIFICATE-----';
|
||||
|
||||
// User — BetterAuth users.id is text (any string, not uuid).
|
||||
await sql!`
|
||||
INSERT INTO users (id, name, email, email_verified, created_at, updated_at)
|
||||
VALUES (${USER1_ID}, ${'M2-01 Test User'}, ${USER1_ID + '@example.com'}, false, now(), now())
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
`;
|
||||
|
||||
// Peer
|
||||
await sql!`
|
||||
INSERT INTO federation_peers
|
||||
(id, common_name, display_name, cert_pem, cert_serial, cert_not_after, state, created_at)
|
||||
VALUES (
|
||||
${PEER1_ID},
|
||||
${T + '-gateway-example-com'},
|
||||
${'Test Peer'},
|
||||
${certPem},
|
||||
${T + '-serial-001'},
|
||||
now() + interval '1 year',
|
||||
${'active'},
|
||||
now()
|
||||
)
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
`;
|
||||
|
||||
// Grant — scope is jsonb; pass as JSON string and cast server-side.
|
||||
const scopeJson = JSON.stringify({
|
||||
resources: ['tasks', 'notes'],
|
||||
operations: ['list', 'get'],
|
||||
});
|
||||
const grants = await sql!`
|
||||
INSERT INTO federation_grants
|
||||
(subject_user_id, peer_id, scope, status, created_at)
|
||||
VALUES (
|
||||
${USER1_ID},
|
||||
${PEER1_ID},
|
||||
${scopeJson}::jsonb,
|
||||
${'active'},
|
||||
now()
|
||||
)
|
||||
RETURNING id
|
||||
`;
|
||||
expect(grants).toHaveLength(1);
|
||||
const grantId = grants[0]!['id'] as string;
|
||||
|
||||
// Audit log row
|
||||
await sql!`
|
||||
INSERT INTO federation_audit_log
|
||||
(request_id, peer_id, subject_user_id, grant_id, verb, resource, status_code, created_at)
|
||||
VALUES (
|
||||
${T + '-req-001'},
|
||||
${PEER1_ID},
|
||||
${USER1_ID},
|
||||
${grantId},
|
||||
${'list'},
|
||||
${'tasks'},
|
||||
${200},
|
||||
now()
|
||||
)
|
||||
`;
|
||||
|
||||
// Verify the audit row is present and has correct data.
|
||||
const auditRows = await sql!`
|
||||
SELECT * FROM federation_audit_log WHERE request_id = ${T + '-req-001'}
|
||||
`;
|
||||
expect(auditRows).toHaveLength(1);
|
||||
expect(auditRows[0]!['status_code']).toBe(200);
|
||||
expect(auditRows[0]!['verb']).toBe('list');
|
||||
expect(auditRows[0]!['resource']).toBe('tasks');
|
||||
}, 30_000);
|
||||
|
||||
// ── 2. FK cascade: user delete cascades grants ─────────────────────────────
|
||||
|
||||
it('cascade-deletes federation_grants when the subject user is deleted', async () => {
|
||||
const cascadeUserId = `${T}-cascade-user`;
|
||||
await sql!`
|
||||
INSERT INTO users (id, name, email, email_verified, created_at, updated_at)
|
||||
VALUES (${cascadeUserId}, ${'Cascade User'}, ${cascadeUserId + '@example.com'}, false, now(), now())
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
`;
|
||||
const scopeJson = JSON.stringify({ resources: ['tasks'] });
|
||||
await sql!`
|
||||
INSERT INTO federation_grants
|
||||
(subject_user_id, peer_id, scope, status, revoked_reason, created_at)
|
||||
VALUES (
|
||||
${cascadeUserId},
|
||||
${PEER1_ID},
|
||||
${scopeJson}::jsonb,
|
||||
${'active'},
|
||||
${T + '-cascade-test'},
|
||||
now()
|
||||
)
|
||||
`;
|
||||
|
||||
const before = await sql!`
|
||||
SELECT count(*)::int AS cnt FROM federation_grants WHERE subject_user_id = ${cascadeUserId}
|
||||
`;
|
||||
expect(before[0]!['cnt']).toBe(1);
|
||||
|
||||
// Delete user → grants should cascade-delete.
|
||||
await sql!`DELETE FROM users WHERE id = ${cascadeUserId}`;
|
||||
|
||||
const after = await sql!`
|
||||
SELECT count(*)::int AS cnt FROM federation_grants WHERE subject_user_id = ${cascadeUserId}
|
||||
`;
|
||||
expect(after[0]!['cnt']).toBe(0);
|
||||
}, 15_000);
|
||||
|
||||
// ── 3. FK set-null: peer delete sets audit_log.peer_id to NULL ────────────
|
||||
|
||||
it('sets federation_audit_log.peer_id to NULL when the peer is deleted', async () => {
|
||||
// Insert a throwaway peer for this specific cascade test.
|
||||
await sql!`
|
||||
INSERT INTO federation_peers
|
||||
(id, common_name, display_name, cert_pem, cert_serial, cert_not_after, state, created_at)
|
||||
VALUES (
|
||||
${PEER2_ID},
|
||||
${T + '-gateway-throwaway-com'},
|
||||
${'Throwaway Peer'},
|
||||
${'cert-pem-placeholder'},
|
||||
${T + '-serial-002'},
|
||||
now() + interval '1 year',
|
||||
${'active'},
|
||||
now()
|
||||
)
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
`;
|
||||
const reqId = `${T}-req-setnull`;
|
||||
await sql!`
|
||||
INSERT INTO federation_audit_log
|
||||
(request_id, peer_id, subject_user_id, verb, resource, status_code, created_at)
|
||||
VALUES (
|
||||
${reqId},
|
||||
${PEER2_ID},
|
||||
${USER1_ID},
|
||||
${'get'},
|
||||
${'tasks'},
|
||||
${200},
|
||||
now()
|
||||
)
|
||||
`;
|
||||
|
||||
await sql!`DELETE FROM federation_peers WHERE id = ${PEER2_ID}`;
|
||||
|
||||
const rows = await sql!`
|
||||
SELECT peer_id FROM federation_audit_log WHERE request_id = ${reqId}
|
||||
`;
|
||||
expect(rows).toHaveLength(1);
|
||||
expect(rows[0]!['peer_id']).toBeNull();
|
||||
}, 15_000);
|
||||
|
||||
// ── 4. Enum constraint: invalid grant_status rejected ─────────────────────
|
||||
|
||||
it('rejects an invalid grant_status value with a DB error', async () => {
|
||||
const scopeJson = JSON.stringify({ resources: ['tasks'] });
|
||||
await expect(
|
||||
sql!`
|
||||
INSERT INTO federation_grants
|
||||
(subject_user_id, peer_id, scope, status, created_at)
|
||||
VALUES (
|
||||
${USER1_ID},
|
||||
${PEER1_ID},
|
||||
${scopeJson}::jsonb,
|
||||
${'invalid_status'},
|
||||
now()
|
||||
)
|
||||
`,
|
||||
).rejects.toThrow();
|
||||
}, 10_000);
|
||||
|
||||
// ── 5. Enum constraint: invalid peer_state rejected ───────────────────────
|
||||
|
||||
it('rejects an invalid peer_state value with a DB error', async () => {
|
||||
await expect(
|
||||
sql!`
|
||||
INSERT INTO federation_peers
|
||||
(common_name, display_name, cert_pem, cert_serial, cert_not_after, state, created_at)
|
||||
VALUES (
|
||||
${'bad-state-peer'},
|
||||
${'Bad State'},
|
||||
${'pem'},
|
||||
${'bad-serial-999'},
|
||||
now() + interval '1 year',
|
||||
${'invalid_state'},
|
||||
now()
|
||||
)
|
||||
`,
|
||||
).rejects.toThrow();
|
||||
}, 10_000);
|
||||
|
||||
// ── 6. Unique constraint: duplicate cert_serial rejected ──────────────────
|
||||
|
||||
it('rejects a duplicate cert_serial with a unique constraint violation', async () => {
|
||||
await expect(
|
||||
sql!`
|
||||
INSERT INTO federation_peers
|
||||
(common_name, display_name, cert_pem, cert_serial, cert_not_after, state, created_at)
|
||||
VALUES (
|
||||
${T + '-dup-cn'},
|
||||
${'Dup Peer'},
|
||||
${'pem'},
|
||||
${T + '-serial-001'},
|
||||
now() + interval '1 year',
|
||||
${'pending'},
|
||||
now()
|
||||
)
|
||||
`,
|
||||
).rejects.toThrow();
|
||||
}, 10_000);
|
||||
|
||||
// ── 7. FK cascade: peer delete cascades to federation_grants ─────────────
|
||||
|
||||
it('cascade-deletes federation_grants when the owning peer is deleted', async () => {
|
||||
const PEER3_ID = `f2000003-0000-4000-8000-000000000003`;
|
||||
const cascadeGrantUserId = `${T}-cascade-grant-user`;
|
||||
|
||||
// Insert a dedicated user and peer for this test.
|
||||
await sql!`
|
||||
INSERT INTO users (id, name, email, email_verified, created_at, updated_at)
|
||||
VALUES (${cascadeGrantUserId}, ${'Cascade Grant User'}, ${cascadeGrantUserId + '@example.com'}, false, now(), now())
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
`;
|
||||
await sql!`
|
||||
INSERT INTO federation_peers
|
||||
(id, common_name, display_name, cert_pem, cert_serial, cert_not_after, state, created_at)
|
||||
VALUES (
|
||||
${PEER3_ID},
|
||||
${T + '-gateway-cascade-peer'},
|
||||
${'Cascade Peer'},
|
||||
${'cert-pem-cascade'},
|
||||
${T + '-serial-003'},
|
||||
now() + interval '1 year',
|
||||
${'active'},
|
||||
now()
|
||||
)
|
||||
ON CONFLICT (id) DO NOTHING
|
||||
`;
|
||||
|
||||
const scopeJson = JSON.stringify({ resources: ['tasks'] });
|
||||
await sql!`
|
||||
INSERT INTO federation_grants
|
||||
(subject_user_id, peer_id, scope, status, created_at)
|
||||
VALUES (
|
||||
${cascadeGrantUserId},
|
||||
${PEER3_ID},
|
||||
${scopeJson}::jsonb,
|
||||
${'active'},
|
||||
now()
|
||||
)
|
||||
`;
|
||||
|
||||
const before = await sql!`
|
||||
SELECT count(*)::int AS cnt FROM federation_grants WHERE peer_id = ${PEER3_ID}
|
||||
`;
|
||||
expect(before[0]!['cnt']).toBe(1);
|
||||
|
||||
// Delete peer → grants should cascade-delete.
|
||||
await sql!`DELETE FROM federation_peers WHERE id = ${PEER3_ID}`;
|
||||
|
||||
const after = await sql!`
|
||||
SELECT count(*)::int AS cnt FROM federation_grants WHERE peer_id = ${PEER3_ID}
|
||||
`;
|
||||
expect(after[0]!['cnt']).toBe(0);
|
||||
|
||||
// Cleanup
|
||||
await sql!`DELETE FROM users WHERE id = ${cascadeGrantUserId}`.catch(() => {});
|
||||
}, 15_000);
|
||||
});
|
||||
21
packages/db/src/federation.ts
Normal file
21
packages/db/src/federation.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
/**
|
||||
* Federation schema re-exports.
|
||||
*
|
||||
* The actual table and enum definitions live in schema.ts (alongside all other
|
||||
* Drizzle tables) to avoid CJS/ESM cross-import issues when drizzle-kit loads
|
||||
* schema files via esbuild-register. Application code that wants named imports
|
||||
* for federation symbols should import from this file.
|
||||
*
|
||||
* M2-01: DB tables and enums only. No business logic.
|
||||
* M2-03 will add JSON schema validation for the `scope` column.
|
||||
* M4 will write rows to federation_audit_log.
|
||||
*/
|
||||
|
||||
export {
|
||||
peerStateEnum,
|
||||
grantStatusEnum,
|
||||
federationPeers,
|
||||
federationGrants,
|
||||
federationAuditLog,
|
||||
federationEnrollmentTokens,
|
||||
} from './schema.js';
|
||||
@@ -2,6 +2,7 @@ export { createDb, type Db, type DbHandle } from './client.js';
|
||||
export { createPgliteDb } from './client-pglite.js';
|
||||
export { runMigrations } from './migrate.js';
|
||||
export * from './schema.js';
|
||||
export * from './federation.js';
|
||||
export {
|
||||
eq,
|
||||
and,
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
|
||||
import {
|
||||
pgTable,
|
||||
pgEnum,
|
||||
text,
|
||||
timestamp,
|
||||
boolean,
|
||||
@@ -372,7 +373,11 @@ export const messages = pgTable(
|
||||
|
||||
// ─── pgvector custom type ───────────────────────────────────────────────────
|
||||
|
||||
const vector = customType<{ data: number[]; driverParam: string; config: { dimensions: number } }>({
|
||||
export const vector = customType<{
|
||||
data: number[];
|
||||
driverParam: string;
|
||||
config: { dimensions: number };
|
||||
}>({
|
||||
dataType(config) {
|
||||
return `vector(${config?.dimensions ?? 1536})`;
|
||||
},
|
||||
@@ -581,3 +586,226 @@ export const summarizationJobs = pgTable(
|
||||
},
|
||||
(t) => [index('summarization_jobs_status_idx').on(t.status)],
|
||||
);
|
||||
|
||||
// ─── Federation ──────────────────────────────────────────────────────────────
|
||||
// Enums declared before tables that reference them.
|
||||
// All federation definitions live in this file (avoids CJS/ESM cross-import
|
||||
// issues when drizzle-kit loads schema files via esbuild-register).
|
||||
// Application code imports from `federation.ts` which re-exports from here.
|
||||
|
||||
/**
|
||||
* Lifecycle state of a federation peer.
|
||||
* - pending: registered but not yet approved / TLS handshake not confirmed
|
||||
* - active: fully operational; mTLS verified
|
||||
* - suspended: temporarily blocked; cert still valid
|
||||
* - revoked: cert revoked; no traffic allowed
|
||||
*/
|
||||
export const peerStateEnum = pgEnum('peer_state', ['pending', 'active', 'suspended', 'revoked']);
|
||||
|
||||
/**
|
||||
* Lifecycle state of a federation grant.
|
||||
* - pending: created but not yet activated (awaiting cert enrollment, M2-07)
|
||||
* - active: grant is in effect
|
||||
* - revoked: manually revoked before expiry
|
||||
* - expired: natural expiry (expires_at passed)
|
||||
*/
|
||||
export const grantStatusEnum = pgEnum('grant_status', ['pending', 'active', 'revoked', 'expired']);
|
||||
|
||||
/**
|
||||
* A registered peer gateway identified by its Step-CA certificate CN.
|
||||
* Represents both inbound peers (other gateways querying us) and outbound
|
||||
* peers (gateways we query — identified by client_key_pem being set).
|
||||
*/
|
||||
export const federationPeers = pgTable(
|
||||
'federation_peers',
|
||||
{
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
|
||||
/** Certificate CN, e.g. "gateway-uscllc-com". Unique — one row per peer identity. */
|
||||
commonName: text('common_name').notNull().unique(),
|
||||
|
||||
/** Human-friendly label shown in admin UI. */
|
||||
displayName: text('display_name').notNull(),
|
||||
|
||||
/** Pinned PEM certificate used for mTLS verification. */
|
||||
certPem: text('cert_pem').notNull(),
|
||||
|
||||
/** Certificate serial number — used for CRL / revocation lookup. */
|
||||
certSerial: text('cert_serial').notNull().unique(),
|
||||
|
||||
/** Certificate expiry — used by the renewal scheduler (FED-M6). */
|
||||
certNotAfter: timestamp('cert_not_after', { withTimezone: true }).notNull(),
|
||||
|
||||
/**
|
||||
* Sealed (encrypted) private key for outbound connections TO this peer.
|
||||
* NULL for inbound-only peer rows (we serve them; we don't call them).
|
||||
*/
|
||||
clientKeyPem: text('client_key_pem'),
|
||||
|
||||
/** Current peer lifecycle state. */
|
||||
state: peerStateEnum('state').notNull().default('pending'),
|
||||
|
||||
/** Base URL for outbound queries, e.g. "https://woltje.com:443". NULL for inbound-only peers. */
|
||||
endpointUrl: text('endpoint_url'),
|
||||
|
||||
/** Timestamp of the most recent successful inbound or outbound request. */
|
||||
lastSeenAt: timestamp('last_seen_at', { withTimezone: true }),
|
||||
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
|
||||
/** Populated when the cert is revoked; NULL while the peer is active. */
|
||||
revokedAt: timestamp('revoked_at', { withTimezone: true }),
|
||||
},
|
||||
(t) => [
|
||||
// CRL / revocation lookups by serial.
|
||||
index('federation_peers_cert_serial_idx').on(t.certSerial),
|
||||
// Filter peers by state (e.g. find all active peers for outbound routing).
|
||||
index('federation_peers_state_idx').on(t.state),
|
||||
],
|
||||
);
|
||||
|
||||
/**
|
||||
* A grant lets a specific peer cert query a specific local user's data within
|
||||
* a defined scope. Scopes are validated by JSON Schema in M2-03; this table
|
||||
* stores them as raw jsonb.
|
||||
*/
|
||||
export const federationGrants = pgTable(
|
||||
'federation_grants',
|
||||
{
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
|
||||
/**
|
||||
* The local user whose data this grant exposes.
|
||||
* Cascade delete: if the user account is deleted, revoke all their grants.
|
||||
*/
|
||||
subjectUserId: text('subject_user_id')
|
||||
.notNull()
|
||||
.references(() => users.id, { onDelete: 'cascade' }),
|
||||
|
||||
/**
|
||||
* The peer gateway holding the grant.
|
||||
* Cascade delete: if the peer record is removed, the grant is moot.
|
||||
*/
|
||||
peerId: uuid('peer_id')
|
||||
.notNull()
|
||||
.references(() => federationPeers.id, { onDelete: 'cascade' }),
|
||||
|
||||
/**
|
||||
* Scope object — validated by JSON Schema (M2-03).
|
||||
* Example: { "resources": ["tasks", "notes"], "operations": ["list", "get"] }
|
||||
*/
|
||||
scope: jsonb('scope').notNull(),
|
||||
|
||||
/** Current grant lifecycle state. */
|
||||
status: grantStatusEnum('status').notNull().default('pending'),
|
||||
|
||||
/** Optional hard expiry. NULL means the grant does not expire automatically. */
|
||||
expiresAt: timestamp('expires_at', { withTimezone: true }),
|
||||
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
|
||||
/** Populated when the grant is explicitly revoked. */
|
||||
revokedAt: timestamp('revoked_at', { withTimezone: true }),
|
||||
|
||||
/** Human-readable reason for revocation (audit trail). */
|
||||
revokedReason: text('revoked_reason'),
|
||||
},
|
||||
(t) => [
|
||||
// Hot path: look up active grants for a subject user (auth middleware).
|
||||
index('federation_grants_subject_status_idx').on(t.subjectUserId, t.status),
|
||||
// Hot path: look up active grants held by a peer (inbound request check).
|
||||
index('federation_grants_peer_status_idx').on(t.peerId, t.status),
|
||||
],
|
||||
);
|
||||
|
||||
/**
|
||||
* Append-only audit log of all federation requests.
|
||||
* M4 writes rows here. M2 only creates the table.
|
||||
*
|
||||
* All FKs use SET NULL so audit rows survive peer/user/grant deletion.
|
||||
*/
|
||||
export const federationAuditLog = pgTable(
|
||||
'federation_audit_log',
|
||||
{
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
|
||||
/** UUIDv7 from the X-Request-ID header — correlates with OTEL traces. */
|
||||
requestId: text('request_id').notNull(),
|
||||
|
||||
/** Peer that made the request. SET NULL if the peer is later deleted. */
|
||||
peerId: uuid('peer_id').references(() => federationPeers.id, { onDelete: 'set null' }),
|
||||
|
||||
/** Subject user whose data was queried. SET NULL if the user is deleted. */
|
||||
subjectUserId: text('subject_user_id').references(() => users.id, { onDelete: 'set null' }),
|
||||
|
||||
/** Grant under which the request was authorised. SET NULL if the grant is deleted. */
|
||||
grantId: uuid('grant_id').references(() => federationGrants.id, { onDelete: 'set null' }),
|
||||
|
||||
/** Request verb: "list" | "get" | "search". */
|
||||
verb: text('verb').notNull(),
|
||||
|
||||
/** Resource type: "tasks" | "notes" | "memory" | etc. */
|
||||
resource: text('resource').notNull(),
|
||||
|
||||
/** HTTP status code returned to the peer. */
|
||||
statusCode: integer('status_code').notNull(),
|
||||
|
||||
/** Number of items returned (NULL for non-list requests or errors). */
|
||||
resultCount: integer('result_count'),
|
||||
|
||||
/** Why the request was denied (NULL when allowed). */
|
||||
deniedReason: text('denied_reason'),
|
||||
|
||||
/** End-to-end latency in milliseconds. */
|
||||
latencyMs: integer('latency_ms'),
|
||||
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
|
||||
// Reserved for M4 — see PRD 7.3
|
||||
/** SHA-256 of the normalised GraphQL/REST query string; written by M4 search. */
|
||||
queryHash: text('query_hash'),
|
||||
/** Request outcome: "allowed" | "denied" | "partial"; written by M4. */
|
||||
outcome: text('outcome'),
|
||||
/** Response payload size in bytes; written by M4. */
|
||||
bytesOut: integer('bytes_out'),
|
||||
},
|
||||
(t) => [
|
||||
// Per-peer request history in reverse chronological order.
|
||||
index('federation_audit_log_peer_created_at_idx').on(t.peerId, t.createdAt.desc()),
|
||||
// Per-user access log in reverse chronological order.
|
||||
index('federation_audit_log_subject_created_at_idx').on(t.subjectUserId, t.createdAt.desc()),
|
||||
// Global time-range scans (dashboards, rate-limit windows).
|
||||
index('federation_audit_log_created_at_idx').on(t.createdAt.desc()),
|
||||
],
|
||||
);
|
||||
|
||||
/**
|
||||
* Single-use enrollment tokens — M2-07.
|
||||
*
|
||||
* An admin creates a token (with a TTL) and hands it out-of-band to the
|
||||
* remote peer operator. The peer redeems it exactly once by posting its
|
||||
* CSR to POST /api/federation/enrollment/:token. The token is atomically
|
||||
* marked as used to prevent replay attacks.
|
||||
*/
|
||||
export const federationEnrollmentTokens = pgTable('federation_enrollment_tokens', {
|
||||
/** 32-byte hex token — crypto.randomBytes(32).toString('hex') */
|
||||
token: text('token').primaryKey(),
|
||||
|
||||
/** The federation grant this enrollment activates. */
|
||||
grantId: uuid('grant_id')
|
||||
.notNull()
|
||||
.references(() => federationGrants.id, { onDelete: 'cascade' }),
|
||||
|
||||
/** The peer record that will be updated on successful enrollment. */
|
||||
peerId: uuid('peer_id')
|
||||
.notNull()
|
||||
.references(() => federationPeers.id, { onDelete: 'cascade' }),
|
||||
|
||||
/** Hard expiry — token rejected after this time even if not used. */
|
||||
expiresAt: timestamp('expires_at', { withTimezone: true }).notNull(),
|
||||
|
||||
/** NULL until the token is redeemed. Set atomically to prevent replay. */
|
||||
usedAt: timestamp('used_at', { withTimezone: true }),
|
||||
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
@@ -13,6 +13,14 @@ export PORTAINER_URL="https://portainer.example.com:9443"
|
||||
export PORTAINER_API_KEY="your-api-key-here"
|
||||
```
|
||||
|
||||
If your Portainer instance uses a self-signed TLS certificate (e.g. internal LAN), set:
|
||||
|
||||
```bash
|
||||
export PORTAINER_INSECURE=1
|
||||
```
|
||||
|
||||
This passes `-k` to all curl calls, bypassing certificate verification. Do not set this against public/production instances.
|
||||
|
||||
You can add these to your shell profile (`~/.bashrc`, `~/.zshrc`) or use a `.env` file.
|
||||
|
||||
### Creating an API Key
|
||||
|
||||
@@ -46,8 +46,14 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Fetch endpoints
|
||||
response=$(curl -s -w "\n%{http_code}" \
|
||||
response=$(curl -s "${CURL_OPTS[@]}" -w "\n%{http_code}" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}/api/endpoints")
|
||||
|
||||
|
||||
@@ -52,8 +52,14 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Fetch stacks
|
||||
response=$(curl -s -w "\n%{http_code}" \
|
||||
response=$(curl -s "${CURL_OPTS[@]}" -w "\n%{http_code}" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}/api/stacks")
|
||||
|
||||
|
||||
@@ -64,12 +64,18 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Function to make API requests
|
||||
api_request() {
|
||||
local method="$1"
|
||||
local endpoint="$2"
|
||||
|
||||
curl -s -w "\n%{http_code}" -X "$method" \
|
||||
curl -s "${CURL_OPTS[@]}" -w "\n%{http_code}" -X "$method" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}${endpoint}"
|
||||
}
|
||||
@@ -165,7 +171,7 @@ fi
|
||||
# Note: Docker API returns raw log stream, not JSON
|
||||
if [[ "$FOLLOW" == "true" ]]; then
|
||||
# Stream logs
|
||||
curl -s -N \
|
||||
curl -s "${CURL_OPTS[@]}" -N \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}/api/endpoints/${ENDPOINT_ID}/docker/containers/${CONTAINER_ID}/logs?${params}" | \
|
||||
# Docker log format has 8-byte header per line, strip it
|
||||
@@ -175,7 +181,7 @@ if [[ "$FOLLOW" == "true" ]]; then
|
||||
done
|
||||
else
|
||||
# Get logs (non-streaming)
|
||||
curl -s \
|
||||
curl -s "${CURL_OPTS[@]}" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}/api/endpoints/${ENDPOINT_ID}/docker/containers/${CONTAINER_ID}/logs?${params}" | \
|
||||
# Docker log format has 8-byte header per line, attempt to strip it
|
||||
|
||||
@@ -63,13 +63,19 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Function to make API requests
|
||||
api_request() {
|
||||
local method="$1"
|
||||
local endpoint="$2"
|
||||
local data="${3:-}"
|
||||
|
||||
local args=(-s -w "\n%{http_code}" -X "$method" -H "X-API-Key: ${PORTAINER_API_KEY}")
|
||||
local args=(-s "${CURL_OPTS[@]}" -w "\n%{http_code}" -X "$method" -H "X-API-Key: ${PORTAINER_API_KEY}")
|
||||
|
||||
if [[ -n "$data" ]]; then
|
||||
args+=(-H "Content-Type: application/json" -d "$data")
|
||||
|
||||
@@ -54,12 +54,18 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Function to make API requests
|
||||
api_request() {
|
||||
local method="$1"
|
||||
local endpoint="$2"
|
||||
|
||||
curl -s -w "\n%{http_code}" -X "$method" \
|
||||
curl -s "${CURL_OPTS[@]}" -w "\n%{http_code}" -X "$method" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}${endpoint}"
|
||||
}
|
||||
|
||||
@@ -57,12 +57,18 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Function to make API requests
|
||||
api_request() {
|
||||
local method="$1"
|
||||
local endpoint="$2"
|
||||
|
||||
curl -s -w "\n%{http_code}" -X "$method" \
|
||||
curl -s "${CURL_OPTS[@]}" -w "\n%{http_code}" -X "$method" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}${endpoint}"
|
||||
}
|
||||
|
||||
@@ -54,12 +54,18 @@ fi
|
||||
# Remove trailing slash from URL
|
||||
PORTAINER_URL="${PORTAINER_URL%/}"
|
||||
|
||||
# TLS options
|
||||
CURL_OPTS=()
|
||||
if [ "${PORTAINER_INSECURE:-0}" = "1" ]; then
|
||||
CURL_OPTS+=(-k)
|
||||
fi
|
||||
|
||||
# Function to make API requests
|
||||
api_request() {
|
||||
local method="$1"
|
||||
local endpoint="$2"
|
||||
|
||||
curl -s -w "\n%{http_code}" -X "$method" \
|
||||
curl -s "${CURL_OPTS[@]}" -w "\n%{http_code}" -X "$method" \
|
||||
-H "X-API-Key: ${PORTAINER_API_KEY}" \
|
||||
"${PORTAINER_URL}${endpoint}"
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import { registerUninstallCommand } from './commands/uninstall.js';
|
||||
// prdy is registered via launch.ts
|
||||
import { registerLaunchCommands } from './commands/launch.js';
|
||||
import { registerAuthCommand } from './commands/auth.js';
|
||||
import { registerFederationCommand } from './commands/federation.js';
|
||||
import { registerGatewayCommand } from './commands/gateway.js';
|
||||
import {
|
||||
backgroundUpdateCheck,
|
||||
@@ -336,6 +337,10 @@ registerAuthCommand(program);
|
||||
|
||||
registerGatewayCommand(program);
|
||||
|
||||
// ─── federation ───────────────────────────────────────────────────────
|
||||
|
||||
registerFederationCommand(program);
|
||||
|
||||
// ─── agent ─────────────────────────────────────────────────────────────
|
||||
|
||||
registerAgentCommand(program);
|
||||
|
||||
410
packages/mosaic/src/commands/federation.ts
Normal file
410
packages/mosaic/src/commands/federation.ts
Normal file
@@ -0,0 +1,410 @@
|
||||
/**
|
||||
* `mosaic federation` command group — federation grant + peer management (FED-M2-08).
|
||||
*
|
||||
* All HTTP calls go to the local gateway admin API using an admin token
|
||||
* resolved from CLI options or meta.json.
|
||||
*
|
||||
* Subcommands:
|
||||
* grant create --peer-id <uuid> --user-id <uuid> --scope <json> [--expires-at <iso>]
|
||||
* grant list [--peer-id <uuid>] [--user-id <uuid>] [--status pending|active|revoked|expired]
|
||||
* grant show <id>
|
||||
* grant revoke <id> [--reason <text>]
|
||||
* grant token <id> [--ttl 900]
|
||||
*
|
||||
* peer list
|
||||
* peer add <enrollment-url>
|
||||
*/
|
||||
|
||||
import type { Command } from 'commander';
|
||||
import { readMeta } from './gateway/daemon.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
interface FedParentOpts {
|
||||
host: string;
|
||||
port: string;
|
||||
token?: string;
|
||||
json?: boolean;
|
||||
}
|
||||
|
||||
interface ResolvedOpts {
|
||||
baseUrl: string;
|
||||
token?: string;
|
||||
json: boolean;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function resolveOpts(raw: FedParentOpts): ResolvedOpts {
|
||||
const meta = readMeta();
|
||||
const host = raw.host ?? meta?.host ?? 'localhost';
|
||||
const port = parseInt(raw.port, 10) || meta?.port || 14242;
|
||||
const token = raw.token ?? meta?.adminToken;
|
||||
return {
|
||||
baseUrl: `http://${host}:${port.toString()}`,
|
||||
token,
|
||||
json: raw.json ?? false,
|
||||
};
|
||||
}
|
||||
|
||||
function requireToken(opts: ResolvedOpts): string {
|
||||
if (!opts.token) {
|
||||
console.error(
|
||||
'Error: admin token required. Use -t/--token <token> or ensure meta.json has adminToken.',
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
return opts.token;
|
||||
}
|
||||
|
||||
async function apiRequest<T>(
|
||||
opts: ResolvedOpts,
|
||||
method: string,
|
||||
path: string,
|
||||
body?: unknown,
|
||||
): Promise<T> {
|
||||
const token = requireToken(opts);
|
||||
const url = `${opts.baseUrl}${path}`;
|
||||
|
||||
const res = await fetch(url, {
|
||||
method,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${token}`,
|
||||
},
|
||||
body: body !== undefined ? JSON.stringify(body) : undefined,
|
||||
});
|
||||
|
||||
const text = await res.text();
|
||||
|
||||
if (!res.ok) {
|
||||
let message = text;
|
||||
try {
|
||||
const parsed = JSON.parse(text) as { message?: string };
|
||||
message = parsed.message ?? text;
|
||||
} catch {
|
||||
// use raw text
|
||||
}
|
||||
throw new Error(`HTTP ${res.status.toString()}: ${message}`);
|
||||
}
|
||||
|
||||
if (!text) return undefined as unknown as T;
|
||||
return JSON.parse(text) as T;
|
||||
}
|
||||
|
||||
function printJson(data: unknown, useJson: boolean): void {
|
||||
if (useJson) {
|
||||
console.log(JSON.stringify(data, null, 2));
|
||||
}
|
||||
}
|
||||
|
||||
function printTable(rows: Record<string, unknown>[]): void {
|
||||
if (rows.length === 0) {
|
||||
console.log('(none)');
|
||||
return;
|
||||
}
|
||||
for (const row of rows) {
|
||||
for (const [key, val] of Object.entries(row)) {
|
||||
console.log(` ${key}: ${String(val ?? '')}`);
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Command registration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export function registerFederationCommand(program: Command): void {
|
||||
const fed = program
|
||||
.command('federation')
|
||||
.alias('fed')
|
||||
.description('Manage federation grants and peers')
|
||||
.option('-h, --host <host>', 'Gateway host', 'localhost')
|
||||
.option('-p, --port <port>', 'Gateway port', '14242')
|
||||
.option('-t, --token <token>', 'Admin token')
|
||||
.option('--json', 'Machine-readable JSON output')
|
||||
.action(() => fed.outputHelp());
|
||||
|
||||
// ─── grant subcommands ─────────────────────────────────────────────────
|
||||
|
||||
const grant = fed
|
||||
.command('grant')
|
||||
.description('Manage federation grants')
|
||||
.action(() => grant.outputHelp());
|
||||
|
||||
grant
|
||||
.command('create')
|
||||
.description('Create a new federation grant')
|
||||
.requiredOption('--peer-id <uuid>', 'Peer UUID')
|
||||
.requiredOption('--user-id <uuid>', 'Subject user UUID')
|
||||
.requiredOption('--scope <json>', 'Grant scope as JSON string')
|
||||
.option('--expires-at <iso>', 'Optional expiry (ISO 8601)')
|
||||
.action(
|
||||
async (cmdOpts: { peerId: string; userId: string; scope: string; expiresAt?: string }) => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
let scope: Record<string, unknown>;
|
||||
try {
|
||||
scope = JSON.parse(cmdOpts.scope) as Record<string, unknown>;
|
||||
} catch {
|
||||
console.error('Error: --scope must be valid JSON');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const body: Record<string, unknown> = {
|
||||
peerId: cmdOpts.peerId,
|
||||
subjectUserId: cmdOpts.userId,
|
||||
scope,
|
||||
};
|
||||
if (cmdOpts.expiresAt) body['expiresAt'] = cmdOpts.expiresAt;
|
||||
|
||||
const result = await apiRequest<Record<string, unknown>>(
|
||||
opts,
|
||||
'POST',
|
||||
'/api/admin/federation/grants',
|
||||
body,
|
||||
);
|
||||
|
||||
if (opts.json) {
|
||||
printJson(result, true);
|
||||
} else {
|
||||
console.log(`Grant created: ${String(result['id'])}`);
|
||||
console.log(` Peer: ${String(result['peerId'])}`);
|
||||
console.log(` User: ${String(result['subjectUserId'])}`);
|
||||
console.log(` Status: ${String(result['status'])}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
grant
|
||||
.command('list')
|
||||
.description('List federation grants')
|
||||
.option('--peer-id <uuid>', 'Filter by peer UUID')
|
||||
.option('--user-id <uuid>', 'Filter by subject user UUID')
|
||||
.option('--status <status>', 'Filter by status (pending|active|revoked|expired)')
|
||||
.action(async (cmdOpts: { peerId?: string; userId?: string; status?: string }) => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
const params = new URLSearchParams();
|
||||
if (cmdOpts.peerId) params.set('peerId', cmdOpts.peerId);
|
||||
if (cmdOpts.userId) params.set('subjectUserId', cmdOpts.userId);
|
||||
if (cmdOpts.status) params.set('status', cmdOpts.status);
|
||||
|
||||
const qs = params.toString() ? `?${params.toString()}` : '';
|
||||
const result = await apiRequest<Record<string, unknown>[]>(
|
||||
opts,
|
||||
'GET',
|
||||
`/api/admin/federation/grants${qs}`,
|
||||
);
|
||||
|
||||
if (opts.json) {
|
||||
printJson(result, true);
|
||||
} else {
|
||||
console.log(`Grants (${result.length.toString()}):\n`);
|
||||
printTable(result);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
grant
|
||||
.command('show <id>')
|
||||
.description('Get a single grant by ID')
|
||||
.action(async (id: string) => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
const result = await apiRequest<Record<string, unknown>>(
|
||||
opts,
|
||||
'GET',
|
||||
`/api/admin/federation/grants/${id}`,
|
||||
);
|
||||
|
||||
if (opts.json) {
|
||||
printJson(result, true);
|
||||
} else {
|
||||
for (const [key, val] of Object.entries(result)) {
|
||||
console.log(` ${key}: ${String(val ?? '')}`);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
grant
|
||||
.command('revoke <id>')
|
||||
.description('Revoke an active grant')
|
||||
.option('--reason <text>', 'Revocation reason')
|
||||
.action(async (id: string, cmdOpts: { reason?: string }) => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
const body: Record<string, unknown> = {};
|
||||
if (cmdOpts.reason) body['reason'] = cmdOpts.reason;
|
||||
|
||||
const result = await apiRequest<Record<string, unknown>>(
|
||||
opts,
|
||||
'PATCH',
|
||||
`/api/admin/federation/grants/${id}/revoke`,
|
||||
body,
|
||||
);
|
||||
|
||||
if (opts.json) {
|
||||
printJson(result, true);
|
||||
} else {
|
||||
console.log(`Grant ${id} revoked.`);
|
||||
if (result['revokedReason']) console.log(` Reason: ${String(result['revokedReason'])}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
grant
|
||||
.command('token <id>')
|
||||
.description('Generate a single-use enrollment token for a grant')
|
||||
.option('--ttl <seconds>', 'Token lifetime in seconds (60-900)', '900')
|
||||
.action(async (id: string, cmdOpts: { ttl: string }) => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
const ttlSeconds = parseInt(cmdOpts.ttl, 10) || 900;
|
||||
const result = await apiRequest<{
|
||||
token: string;
|
||||
expiresAt: string;
|
||||
enrollmentUrl: string;
|
||||
}>(opts, 'POST', `/api/admin/federation/grants/${id}/tokens`, { ttlSeconds });
|
||||
|
||||
if (opts.json) {
|
||||
printJson(result, true);
|
||||
} else {
|
||||
console.log('Enrollment token generated:');
|
||||
console.log(` Token: ${result.token}`);
|
||||
console.log(` Expires at: ${result.expiresAt}`);
|
||||
console.log(` Enrollment URL: ${result.enrollmentUrl}`);
|
||||
console.log('');
|
||||
console.log('Share the enrollment URL with the remote peer operator.');
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
// ─── peer subcommands ──────────────────────────────────────────────────
|
||||
|
||||
const peer = fed
|
||||
.command('peer')
|
||||
.description('Manage federation peers')
|
||||
.action(() => peer.outputHelp());
|
||||
|
||||
peer
|
||||
.command('list')
|
||||
.description('List all federation peers')
|
||||
.action(async () => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
const result = await apiRequest<Record<string, unknown>[]>(
|
||||
opts,
|
||||
'GET',
|
||||
'/api/admin/federation/peers',
|
||||
);
|
||||
|
||||
if (opts.json) {
|
||||
printJson(result, true);
|
||||
} else {
|
||||
console.log(`Peers (${result.length.toString()}):\n`);
|
||||
printTable(result);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
peer
|
||||
.command('add <enrollment-url>')
|
||||
.description('Enroll as a peer using a remote enrollment URL')
|
||||
.action(async (enrollmentUrl: string) => {
|
||||
const opts = resolveOpts(fed.opts() as FedParentOpts);
|
||||
try {
|
||||
// 1. Validate enrollment URL
|
||||
let parsedUrl: URL;
|
||||
try {
|
||||
parsedUrl = new URL(enrollmentUrl);
|
||||
} catch {
|
||||
console.error(`Error: invalid enrollment URL: ${enrollmentUrl}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (parsedUrl.protocol !== 'https:' && parsedUrl.protocol !== 'http:') {
|
||||
console.error('Error: enrollment URL must use http or https');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const hostname = parsedUrl.hostname;
|
||||
const commonName = hostname.replace(/\./g, '-');
|
||||
|
||||
console.log(`Enrolling as peer with remote: ${enrollmentUrl}`);
|
||||
console.log(` Common name: ${commonName}`);
|
||||
|
||||
// 2. Generate key pair and CSR via local gateway
|
||||
console.log('Generating key pair and CSR...');
|
||||
const keypairResult = await apiRequest<{ peerId: string; csrPem: string }>(
|
||||
opts,
|
||||
'POST',
|
||||
'/api/admin/federation/peers/keypair',
|
||||
{ commonName, displayName: hostname },
|
||||
);
|
||||
|
||||
const { peerId, csrPem } = keypairResult;
|
||||
console.log(` Peer ID: ${peerId}`);
|
||||
|
||||
// 3. Submit CSR to remote enrollment endpoint
|
||||
console.log('Submitting CSR to remote enrollment endpoint...');
|
||||
const remoteRes = await fetch(enrollmentUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ csrPem }),
|
||||
});
|
||||
|
||||
if (!remoteRes.ok) {
|
||||
const errText = await remoteRes.text();
|
||||
throw new Error(`Remote enrollment failed (${remoteRes.status.toString()}): ${errText}`);
|
||||
}
|
||||
|
||||
const remoteResult = (await remoteRes.json()) as { certPem: string; certChainPem: string };
|
||||
|
||||
if (!remoteResult.certPem) {
|
||||
throw new Error('Remote enrollment response missing certPem');
|
||||
}
|
||||
|
||||
// 4. Store the signed certificate in the local gateway
|
||||
console.log('Storing signed certificate...');
|
||||
await apiRequest<Record<string, unknown>>(
|
||||
opts,
|
||||
'PATCH',
|
||||
`/api/admin/federation/peers/${peerId}/cert`,
|
||||
{ certPem: remoteResult.certPem },
|
||||
);
|
||||
|
||||
console.log(`\nPeer enrolled successfully.`);
|
||||
console.log(` ID: ${peerId}`);
|
||||
console.log(` State: active`);
|
||||
} catch (err) {
|
||||
console.error(err instanceof Error ? err.message : String(err));
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
}
|
||||
294
packages/mosaic/src/commands/gateway-doctor.spec.ts
Normal file
294
packages/mosaic/src/commands/gateway-doctor.spec.ts
Normal file
@@ -0,0 +1,294 @@
|
||||
/**
|
||||
* Unit tests for gateway-doctor.ts (mosaic gateway doctor).
|
||||
*
|
||||
* All external I/O is mocked — no live services required.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import type { TierHealthReport } from '@mosaicstack/storage';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Shared mock state */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const mocks = vi.hoisted(() => {
|
||||
const mockLoadConfig = vi.fn();
|
||||
const mockProbeServiceHealth = vi.fn();
|
||||
const mockExistsSync = vi.fn();
|
||||
|
||||
return { mockLoadConfig, mockProbeServiceHealth, mockExistsSync };
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Module mocks */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
vi.mock('@mosaicstack/config', () => ({
|
||||
loadConfig: mocks.mockLoadConfig,
|
||||
}));
|
||||
|
||||
vi.mock('@mosaicstack/storage', () => ({
|
||||
probeServiceHealth: mocks.mockProbeServiceHealth,
|
||||
}));
|
||||
|
||||
vi.mock('node:fs', () => ({
|
||||
existsSync: mocks.mockExistsSync,
|
||||
}));
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Import SUT */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
import { runGatewayDoctor } from './gateway-doctor.js';
|
||||
import type { MosaicConfig } from '@mosaicstack/config';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Fixtures */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const STANDALONE_CONFIG: MosaicConfig = {
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@localhost:5432/mosaic' },
|
||||
queue: { type: 'bullmq', url: 'redis://localhost:6380' },
|
||||
memory: { type: 'keyword' },
|
||||
};
|
||||
|
||||
const GREEN_REPORT: TierHealthReport = {
|
||||
tier: 'standalone',
|
||||
configPath: '/some/mosaic.config.json',
|
||||
overall: 'green',
|
||||
services: [
|
||||
{ name: 'postgres', status: 'ok', host: 'localhost', port: 5432, durationMs: 42 },
|
||||
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 10 },
|
||||
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
|
||||
],
|
||||
};
|
||||
|
||||
const RED_REPORT: TierHealthReport = {
|
||||
tier: 'standalone',
|
||||
configPath: '/some/mosaic.config.json',
|
||||
overall: 'red',
|
||||
services: [
|
||||
{
|
||||
name: 'postgres',
|
||||
status: 'fail',
|
||||
host: 'localhost',
|
||||
port: 5432,
|
||||
durationMs: 5001,
|
||||
error: {
|
||||
message: 'connection refused',
|
||||
remediation: 'Start Postgres: `docker compose ...`',
|
||||
},
|
||||
},
|
||||
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 8 },
|
||||
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
|
||||
],
|
||||
};
|
||||
|
||||
const FEDERATED_GREEN_REPORT: TierHealthReport = {
|
||||
tier: 'federated',
|
||||
configPath: '/some/mosaic.config.json',
|
||||
overall: 'green',
|
||||
services: [
|
||||
{ name: 'postgres', status: 'ok', host: 'localhost', port: 5433, durationMs: 30 },
|
||||
{ name: 'valkey', status: 'ok', host: 'localhost', port: 6380, durationMs: 5 },
|
||||
{ name: 'pgvector', status: 'ok', host: 'localhost', port: 5433, durationMs: 25 },
|
||||
],
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Process helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
let stdoutCapture = '';
|
||||
let exitCode: number | undefined;
|
||||
|
||||
function captureOutput(): void {
|
||||
stdoutCapture = '';
|
||||
exitCode = undefined;
|
||||
|
||||
vi.spyOn(process.stdout, 'write').mockImplementation((chunk) => {
|
||||
stdoutCapture += typeof chunk === 'string' ? chunk : chunk.toString();
|
||||
return true;
|
||||
});
|
||||
vi.spyOn(process.stderr, 'write').mockImplementation(() => true);
|
||||
vi.spyOn(process, 'exit').mockImplementation((code?: string | number | null) => {
|
||||
exitCode = typeof code === 'number' ? code : code != null ? Number(code) : undefined;
|
||||
throw new Error(`process.exit(${String(code)})`);
|
||||
});
|
||||
vi.spyOn(console, 'log').mockImplementation((...args: unknown[]) => {
|
||||
stdoutCapture += args.join(' ') + '\n';
|
||||
});
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Tests */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runGatewayDoctor', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
captureOutput();
|
||||
|
||||
// By default: no config file on disk (env-detection path)
|
||||
mocks.mockExistsSync.mockReturnValue(false);
|
||||
mocks.mockLoadConfig.mockReturnValue(STANDALONE_CONFIG);
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 1. JSON mode: parseable JSON matching the schema */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('JSON mode emits parseable JSON matching TierHealthReport schema', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({ json: true });
|
||||
|
||||
const parsed = JSON.parse(stdoutCapture) as TierHealthReport;
|
||||
expect(parsed.tier).toBe('standalone');
|
||||
expect(parsed.overall).toBe('green');
|
||||
expect(Array.isArray(parsed.services)).toBe(true);
|
||||
expect(parsed.services).toHaveLength(3);
|
||||
|
||||
// Validate shape of each service check
|
||||
for (const svc of parsed.services) {
|
||||
expect(['postgres', 'valkey', 'pgvector']).toContain(svc.name);
|
||||
expect(['ok', 'fail', 'skipped']).toContain(svc.status);
|
||||
expect(typeof svc.durationMs).toBe('number');
|
||||
}
|
||||
|
||||
// JSON mode must be silent on console.log — output goes to process.stdout only.
|
||||
expect(console.log).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('JSON mode for federated with 3 ok services', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(FEDERATED_GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({ json: true });
|
||||
|
||||
const parsed = JSON.parse(stdoutCapture) as TierHealthReport;
|
||||
expect(parsed.tier).toBe('federated');
|
||||
expect(parsed.overall).toBe('green');
|
||||
expect(parsed.services.every((s) => s.status === 'ok')).toBe(true);
|
||||
|
||||
// JSON mode must be silent on console.log — output goes to process.stdout only.
|
||||
expect(console.log).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 2. Plain text mode: service lines and overall verdict */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('plain text mode includes service lines for each service', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
expect(stdoutCapture).toContain('postgres');
|
||||
expect(stdoutCapture).toContain('valkey');
|
||||
expect(stdoutCapture).toContain('pgvector');
|
||||
});
|
||||
|
||||
it('plain text mode includes Overall verdict', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
expect(stdoutCapture).toContain('Overall: GREEN');
|
||||
});
|
||||
|
||||
it('plain text mode shows tier and config path in header', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
expect(stdoutCapture).toContain('Tier: standalone');
|
||||
});
|
||||
|
||||
it('plain text mode shows remediation for failed services', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
|
||||
|
||||
try {
|
||||
await runGatewayDoctor({});
|
||||
} catch {
|
||||
// process.exit throws in test
|
||||
}
|
||||
|
||||
expect(stdoutCapture).toContain('Remediations:');
|
||||
expect(stdoutCapture).toContain('Start Postgres');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 3. Exit codes */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('exits with code 1 when overall is red', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
|
||||
|
||||
await expect(runGatewayDoctor({})).rejects.toThrow('process.exit(1)');
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it('exits with code 0 (no exit call) when overall is green', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(GREEN_REPORT);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
// process.exit should NOT have been called for green.
|
||||
expect(exitCode).toBeUndefined();
|
||||
});
|
||||
|
||||
it('JSON mode exits with code 1 when overall is red', async () => {
|
||||
mocks.mockProbeServiceHealth.mockResolvedValue(RED_REPORT);
|
||||
|
||||
await expect(runGatewayDoctor({ json: true })).rejects.toThrow('process.exit(1)');
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 4. --config path override is honored */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('passes --config path to loadConfig when provided', async () => {
|
||||
const customPath = '/custom/path/mosaic.config.json';
|
||||
|
||||
await runGatewayDoctor({ config: customPath });
|
||||
|
||||
// loadConfig should have been called with the resolved custom path.
|
||||
expect(mocks.mockLoadConfig).toHaveBeenCalledWith(
|
||||
expect.stringContaining('mosaic.config.json'),
|
||||
);
|
||||
// The exact call should include the custom path (resolved).
|
||||
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
|
||||
expect(calledPath).toContain('custom/path/mosaic.config.json');
|
||||
});
|
||||
|
||||
it('calls loadConfig without path when no --config and no file on disk', async () => {
|
||||
mocks.mockExistsSync.mockReturnValue(false);
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
|
||||
// When no file found, resolveConfigPath returns undefined, so loadConfig is called with undefined
|
||||
expect(calledPath).toBeUndefined();
|
||||
});
|
||||
|
||||
it('finds config from cwd when mosaic.config.json exists there', async () => {
|
||||
// First candidate (cwd/mosaic.config.json) exists.
|
||||
mocks.mockExistsSync.mockImplementation((p: unknown) => {
|
||||
return typeof p === 'string' && p.endsWith('mosaic.config.json');
|
||||
});
|
||||
|
||||
await runGatewayDoctor({});
|
||||
|
||||
const [calledPath] = mocks.mockLoadConfig.mock.calls[0] as [string | undefined];
|
||||
expect(calledPath).toBeDefined();
|
||||
expect(typeof calledPath).toBe('string');
|
||||
expect(calledPath!.endsWith('mosaic.config.json')).toBe(true);
|
||||
});
|
||||
});
|
||||
143
packages/mosaic/src/commands/gateway-doctor.ts
Normal file
143
packages/mosaic/src/commands/gateway-doctor.ts
Normal file
@@ -0,0 +1,143 @@
|
||||
/**
|
||||
* gateway-doctor.ts — `mosaic gateway doctor` implementation.
|
||||
*
|
||||
* Reports current tier and per-service health (PG, Valkey, pgvector) for the
|
||||
* Mosaic gateway. Supports machine-readable JSON output for CI.
|
||||
*
|
||||
* Exit codes:
|
||||
* 0 — overall green or yellow
|
||||
* 1 — overall red (at least one required service failed)
|
||||
*/
|
||||
|
||||
import { existsSync } from 'node:fs';
|
||||
import { resolve, join } from 'node:path';
|
||||
import { homedir } from 'node:os';
|
||||
import { loadConfig } from '@mosaicstack/config';
|
||||
import { probeServiceHealth } from '@mosaicstack/storage';
|
||||
import type { TierHealthReport, ServiceCheck } from '@mosaicstack/storage';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Config resolution */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const CONFIG_CANDIDATES = [
|
||||
resolve(process.cwd(), 'mosaic.config.json'),
|
||||
join(homedir(), '.mosaic', 'mosaic.config.json'),
|
||||
];
|
||||
|
||||
/**
|
||||
* Resolve the config path to report in output.
|
||||
*
|
||||
* Priority:
|
||||
* 1. Explicit `--config <path>` flag
|
||||
* 2. `./mosaic.config.json` (cwd)
|
||||
* 3. `~/.mosaic/mosaic.config.json`
|
||||
* 4. undefined — `loadConfig()` falls back to env-var detection
|
||||
*
|
||||
* `loadConfig()` itself already handles priority 1-3 when passed an explicit
|
||||
* path, and falls back to env-detection when none exists. We resolve here
|
||||
* only so we can surface the path in the health report.
|
||||
*/
|
||||
function resolveConfigPath(explicit?: string): string | undefined {
|
||||
if (explicit) return resolve(explicit);
|
||||
for (const candidate of CONFIG_CANDIDATES) {
|
||||
if (existsSync(candidate)) return candidate;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Output helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const TICK = '\u2713'; // ✓
|
||||
const CROSS = '\u2717'; // ✗
|
||||
const SKIP = '-';
|
||||
|
||||
function padRight(s: string, n: number): string {
|
||||
return s + ' '.repeat(Math.max(0, n - s.length));
|
||||
}
|
||||
|
||||
function serviceLabel(svc: ServiceCheck): string {
|
||||
const hostPort =
|
||||
svc.host !== undefined && svc.port !== undefined ? `${svc.host}:${svc.port.toString()}` : '';
|
||||
const duration = `(${svc.durationMs.toString()}ms)`;
|
||||
|
||||
switch (svc.status) {
|
||||
case 'ok':
|
||||
return ` ${TICK} ${padRight(svc.name, 10)} ${padRight(hostPort, 22)} ${duration}`;
|
||||
case 'fail': {
|
||||
const errMsg = svc.error?.message ?? 'unknown error';
|
||||
return ` ${CROSS} ${padRight(svc.name, 10)} ${padRight(hostPort, 22)} ${duration} \u2192 ${errMsg}`;
|
||||
}
|
||||
case 'skipped':
|
||||
return ` ${SKIP} ${padRight(svc.name, 10)} (skipped)`;
|
||||
}
|
||||
}
|
||||
|
||||
function printReport(report: TierHealthReport): void {
|
||||
const configDisplay = report.configPath ?? '(auto-detected)';
|
||||
console.log(`Tier: ${report.tier} Config: ${configDisplay}`);
|
||||
console.log('');
|
||||
|
||||
for (const svc of report.services) {
|
||||
console.log(serviceLabel(svc));
|
||||
}
|
||||
|
||||
console.log('');
|
||||
|
||||
// Print remediations for failed services.
|
||||
const failed = report.services.filter((s) => s.status === 'fail' && s.error);
|
||||
if (failed.length > 0) {
|
||||
console.log('Remediations:');
|
||||
for (const svc of failed) {
|
||||
if (svc.error) {
|
||||
console.log(` ${svc.name}: ${svc.error.remediation}`);
|
||||
}
|
||||
}
|
||||
console.log('');
|
||||
}
|
||||
|
||||
console.log(`Overall: ${report.overall.toUpperCase()}`);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Main runner */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export interface GatewayDoctorOptions {
|
||||
json?: boolean;
|
||||
config?: string;
|
||||
}
|
||||
|
||||
export async function runGatewayDoctor(opts: GatewayDoctorOptions): Promise<void> {
|
||||
const configPath = resolveConfigPath(opts.config);
|
||||
|
||||
let mosaicConfig;
|
||||
try {
|
||||
mosaicConfig = loadConfig(configPath);
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
if (opts.json) {
|
||||
process.stdout.write(
|
||||
JSON.stringify({ error: `Failed to load config: ${msg}` }, null, 2) + '\n',
|
||||
);
|
||||
} else {
|
||||
process.stderr.write(`Error: Failed to load config: ${msg}\n`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const report = await probeServiceHealth(mosaicConfig, configPath);
|
||||
|
||||
if (opts.json) {
|
||||
process.stdout.write(JSON.stringify(report, null, 2) + '\n');
|
||||
} else {
|
||||
printReport(report);
|
||||
}
|
||||
|
||||
// Exit 1 if overall is red.
|
||||
if (report.overall === 'red') {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
@@ -206,4 +206,15 @@ export function registerGatewayCommand(program: Command): void {
|
||||
const { runUninstall } = await import('./gateway/uninstall.js');
|
||||
await runUninstall();
|
||||
});
|
||||
|
||||
// ─── doctor ─────────────────────────────────────────────────────────────────
|
||||
|
||||
gw.command('doctor')
|
||||
.description('Check gateway tier and per-service health (PG, Valkey, pgvector)')
|
||||
.option('--json', 'Emit TierHealthReport as JSON to stdout (suppresses all other output)')
|
||||
.option('--config <path>', 'Path to mosaic.config.json (defaults to cwd or ~/.mosaic/)')
|
||||
.action(async (cmdOpts: { json?: boolean; config?: string }) => {
|
||||
const { runGatewayDoctor } = await import('./gateway-doctor.js');
|
||||
await runGatewayDoctor({ json: cmdOpts.json, config: cmdOpts.config });
|
||||
});
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"url": "https://git.mosaicstack.dev/mosaicstack/stack.git",
|
||||
"directory": "packages/storage"
|
||||
},
|
||||
"type": "module",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"exports": {
|
||||
@@ -24,9 +25,12 @@
|
||||
"@electric-sql/pglite": "^0.2.17",
|
||||
"@mosaicstack/db": "workspace:^",
|
||||
"@mosaicstack/types": "workspace:*",
|
||||
"commander": "^13.0.0"
|
||||
"commander": "^13.0.0",
|
||||
"ioredis": "^5.10.0",
|
||||
"postgres": "^3.4.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"drizzle-orm": "^0.45.1",
|
||||
"typescript": "^5.8.0",
|
||||
"vitest": "^2.0.0"
|
||||
},
|
||||
|
||||
107
packages/storage/src/adapters/postgres.spec.ts
Normal file
107
packages/storage/src/adapters/postgres.spec.ts
Normal file
@@ -0,0 +1,107 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import type { DbHandle } from '@mosaicstack/db';
|
||||
|
||||
// Mock @mosaicstack/db before importing the adapter
|
||||
vi.mock('@mosaicstack/db', async (importOriginal) => {
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const actual = await importOriginal<Record<string, any>>();
|
||||
return {
|
||||
...actual,
|
||||
createDb: vi.fn(),
|
||||
runMigrations: vi.fn().mockResolvedValue(undefined),
|
||||
};
|
||||
});
|
||||
|
||||
import { createDb, runMigrations } from '@mosaicstack/db';
|
||||
import { PostgresAdapter } from './postgres.js';
|
||||
|
||||
describe('PostgresAdapter — vector extension gating', () => {
|
||||
let mockExecute: ReturnType<typeof vi.fn>;
|
||||
let mockDb: { execute: ReturnType<typeof vi.fn> };
|
||||
let mockHandle: Pick<DbHandle, 'close'> & { db: typeof mockDb };
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
mockExecute = vi.fn().mockResolvedValue(undefined);
|
||||
mockDb = { execute: mockExecute };
|
||||
mockHandle = { db: mockDb, close: vi.fn().mockResolvedValue(undefined) };
|
||||
vi.mocked(createDb).mockReturnValue(mockHandle as unknown as DbHandle);
|
||||
});
|
||||
|
||||
it('calls db.execute with CREATE EXTENSION IF NOT EXISTS vector when enableVector=true', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: true,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
// Should have called execute
|
||||
expect(mockExecute).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Verify the SQL contains the extension creation statement.
|
||||
// Prefer Drizzle's public toSQL() API; fall back to queryChunks if unavailable.
|
||||
// NOTE: queryChunks is an undocumented Drizzle internal (drizzle-orm ^0.45.x).
|
||||
// toSQL() was not present on the raw sql`` result in this version — if a future
|
||||
// Drizzle upgrade adds it, remove the fallback path and delete this comment.
|
||||
const sqlObj = mockExecute.mock.calls[0]![0] as {
|
||||
toSQL?: () => { sql: string; params: unknown[] };
|
||||
queryChunks?: Array<{ value: string[] }>;
|
||||
};
|
||||
const sqlText = sqlObj.toSQL
|
||||
? sqlObj.toSQL().sql.toLowerCase()
|
||||
: (sqlObj.queryChunks ?? [])
|
||||
.flatMap((chunk) => chunk.value)
|
||||
.join('')
|
||||
.toLowerCase();
|
||||
expect(sqlText).toContain('create extension if not exists vector');
|
||||
});
|
||||
|
||||
it('does NOT call db.execute for extension when enableVector is false', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: false,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(mockExecute).not.toHaveBeenCalled();
|
||||
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('does NOT call db.execute for extension when enableVector is unset', async () => {
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(mockExecute).not.toHaveBeenCalled();
|
||||
expect(vi.mocked(runMigrations)).toHaveBeenCalledOnce();
|
||||
});
|
||||
|
||||
it('calls runMigrations after the extension is created', async () => {
|
||||
const callOrder: string[] = [];
|
||||
mockExecute.mockImplementation(() => {
|
||||
callOrder.push('execute');
|
||||
return Promise.resolve(undefined);
|
||||
});
|
||||
vi.mocked(runMigrations).mockImplementation(() => {
|
||||
callOrder.push('runMigrations');
|
||||
return Promise.resolve();
|
||||
});
|
||||
|
||||
const adapter = new PostgresAdapter({
|
||||
type: 'postgres',
|
||||
url: 'postgresql://test:test@localhost:5432/test',
|
||||
enableVector: true,
|
||||
});
|
||||
|
||||
await adapter.migrate();
|
||||
|
||||
expect(callOrder).toEqual(['execute', 'runMigrations']);
|
||||
});
|
||||
});
|
||||
@@ -66,13 +66,19 @@ export class PostgresAdapter implements StorageAdapter {
|
||||
private handle: DbHandle;
|
||||
private db: Db;
|
||||
private url: string;
|
||||
private enableVector: boolean;
|
||||
|
||||
constructor(config: Extract<StorageConfig, { type: 'postgres' }>) {
|
||||
this.url = config.url;
|
||||
this.enableVector = config.enableVector ?? false;
|
||||
this.handle = createDb(config.url);
|
||||
this.db = this.handle.db;
|
||||
}
|
||||
|
||||
private async ensureVectorExtension(): Promise<void> {
|
||||
await this.db.execute(sql`CREATE EXTENSION IF NOT EXISTS vector`);
|
||||
}
|
||||
|
||||
async create<T extends Record<string, unknown>>(
|
||||
collection: string,
|
||||
data: T,
|
||||
@@ -149,6 +155,9 @@ export class PostgresAdapter implements StorageAdapter {
|
||||
}
|
||||
|
||||
async migrate(): Promise<void> {
|
||||
if (this.enableVector) {
|
||||
await this.ensureVectorExtension();
|
||||
}
|
||||
await runMigrations(this.url);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import type { Command } from 'commander';
|
||||
import type { MigrationSource } from './migrate-tier.js';
|
||||
import { redactErrMsg } from './redact-error.js';
|
||||
|
||||
/**
|
||||
* Reads the DATABASE_URL environment variable and redacts the password portion.
|
||||
@@ -72,7 +74,7 @@ export function registerStorageCommand(parent: Command): void {
|
||||
console.log('[storage] reachable: yes');
|
||||
} catch (err) {
|
||||
console.log(
|
||||
`[storage] reachable: no (${err instanceof Error ? err.message : String(err)})`,
|
||||
`[storage] reachable: no (${redactErrMsg(err instanceof Error ? err.message : String(err))})`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
@@ -209,6 +211,203 @@ export function registerStorageCommand(parent: Command): void {
|
||||
}
|
||||
});
|
||||
|
||||
// ── storage migrate-tier ─────────────────────────────────────────────────
|
||||
|
||||
storage
|
||||
.command('migrate-tier')
|
||||
.description('Migrate data from tier: local/standalone → tier: federated (Postgres + pgvector)')
|
||||
.requiredOption(
|
||||
'--to <tier>',
|
||||
'Target tier to migrate to (only "federated" is supported)',
|
||||
'federated',
|
||||
)
|
||||
.requiredOption('--target-url <url>', 'Target federated Postgres connection string (required)')
|
||||
.option(
|
||||
'--source-config <path>',
|
||||
'Path to mosaic.config.json (default: cwd/mosaic.config.json)',
|
||||
)
|
||||
.option('--dry-run', 'Print what would be migrated without writing anything')
|
||||
.option('--yes', 'Skip interactive confirmation prompt (required for non-TTY environments)')
|
||||
.option('--batch-size <n>', 'Rows per transaction batch', '1000')
|
||||
.option('--allow-non-empty', 'Allow writing to a non-empty target (upsert — idempotent)')
|
||||
.action(
|
||||
async (opts: {
|
||||
to: string;
|
||||
targetUrl: string;
|
||||
sourceConfig?: string;
|
||||
dryRun?: boolean;
|
||||
yes?: boolean;
|
||||
batchSize?: string;
|
||||
allowNonEmpty?: boolean;
|
||||
}) => {
|
||||
if (opts.to !== 'federated') {
|
||||
console.error(
|
||||
`[migrate-tier] --to "${opts.to}" is not supported. Only "federated" is allowed.`,
|
||||
);
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
const batchSize = parseInt(opts.batchSize ?? '1000', 10);
|
||||
if (isNaN(batchSize) || batchSize < 1) {
|
||||
console.error('[migrate-tier] --batch-size must be a positive integer.');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
// Redact target URL password for display.
|
||||
function redactUrl(url: string): string {
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
if (parsed.password) parsed.password = '***';
|
||||
return parsed.toString();
|
||||
} catch {
|
||||
return url.replace(/:([^@/]+)@/, ':***@');
|
||||
}
|
||||
}
|
||||
|
||||
const redactedTarget = redactUrl(opts.targetUrl);
|
||||
const isDryRun = opts.dryRun ?? false;
|
||||
const allowNonEmpty = opts.allowNonEmpty ?? false;
|
||||
|
||||
// Determine source tier from environment.
|
||||
const sourceTier = activeTier();
|
||||
const sourceDesc = configSource();
|
||||
|
||||
console.log('');
|
||||
console.log('[migrate-tier] ─────────────────────────────────────────');
|
||||
console.log(`[migrate-tier] Source tier: ${sourceTier}`);
|
||||
console.log(`[migrate-tier] Source: ${sourceDesc}`);
|
||||
console.log(`[migrate-tier] Target tier: federated (Postgres + pgvector)`);
|
||||
console.log(`[migrate-tier] Target: ${redactedTarget}`);
|
||||
console.log(`[migrate-tier] Batch size: ${batchSize.toString()}`);
|
||||
console.log(`[migrate-tier] Dry run: ${isDryRun.toString()}`);
|
||||
console.log(`[migrate-tier] Allow non-empty: ${allowNonEmpty.toString()}`);
|
||||
console.log('[migrate-tier] ─────────────────────────────────────────');
|
||||
console.log('');
|
||||
|
||||
// Lazy-import core migration logic to keep the CLI thin.
|
||||
const {
|
||||
runMigrateTier,
|
||||
PostgresMigrationTarget,
|
||||
DrizzleMigrationSource,
|
||||
getMigrationOrder,
|
||||
} = await import('./migrate-tier.js');
|
||||
|
||||
// Build source adapter using Drizzle-backed DrizzleMigrationSource.
|
||||
// Both local (PGlite) and standalone (Postgres) sources expose the same
|
||||
// normalized Drizzle schema — this is where the actual domain data lives.
|
||||
let sourceAdapter: MigrationSource;
|
||||
if (sourceTier === 'pglite') {
|
||||
const { createPgliteDb } = await import('@mosaicstack/db');
|
||||
const pgliteDataDir = process.env['PGLITE_DATA_DIR'];
|
||||
if (!pgliteDataDir) {
|
||||
console.error(
|
||||
'[migrate-tier] PGLITE_DATA_DIR is not set. ' +
|
||||
'Cannot open PGlite source — set it to the data directory path.',
|
||||
);
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
const handle = createPgliteDb(pgliteDataDir);
|
||||
// Local/PGlite sources do not have pgvector registered — the embedding
|
||||
// column is omitted from the insights SELECT and set to null on target.
|
||||
sourceAdapter = new DrizzleMigrationSource(handle.db, /* sourceHasVector= */ false);
|
||||
} else {
|
||||
const { createDb } = await import('@mosaicstack/db');
|
||||
const url = process.env['DATABASE_URL'];
|
||||
if (!url) {
|
||||
console.error('[migrate-tier] DATABASE_URL is not set for postgres source.');
|
||||
process.exitCode = 1;
|
||||
return;
|
||||
}
|
||||
const handle = createDb(url);
|
||||
// Standalone Postgres may or may not have pgvector — assume it does not
|
||||
// (it is a non-federated tier) so embedding is treated as null.
|
||||
sourceAdapter = new DrizzleMigrationSource(handle.db, /* sourceHasVector= */ false);
|
||||
}
|
||||
|
||||
// Print per-table row counts for the confirmation prompt.
|
||||
const tablesToMigrate = getMigrationOrder();
|
||||
const counts: Array<{ table: string; count: number }> = [];
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = await sourceAdapter.count(table);
|
||||
counts.push({ table, count: n });
|
||||
}
|
||||
|
||||
console.log('[migrate-tier] Source row counts:');
|
||||
for (const { table, count } of counts) {
|
||||
console.log(` ${table}: ${count.toString()}`);
|
||||
}
|
||||
console.log(' sessions: SKIPPED (ephemeral)');
|
||||
console.log(' verifications: SKIPPED (ephemeral)');
|
||||
console.log(' admin_tokens: SKIPPED (environment-specific)');
|
||||
console.log('');
|
||||
|
||||
// Interactive confirmation unless --yes or dry-run.
|
||||
const isTTY = process.stdin.isTTY;
|
||||
if (!isDryRun) {
|
||||
if (!opts.yes && !isTTY) {
|
||||
console.error(
|
||||
'[migrate-tier] Not running in a TTY and --yes was not passed. ' +
|
||||
'Pass --yes to confirm in headless environments.',
|
||||
);
|
||||
process.exitCode = 1;
|
||||
await sourceAdapter.close();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!opts.yes) {
|
||||
const { createInterface } = await import('node:readline');
|
||||
const rl = createInterface({ input: process.stdin, output: process.stdout });
|
||||
const answer = await new Promise<string>((resolve) => {
|
||||
rl.question(`This will WRITE to ${redactedTarget}. Continue? [y/N] `, (ans) => {
|
||||
rl.close();
|
||||
resolve(ans);
|
||||
});
|
||||
});
|
||||
if (answer.trim().toLowerCase() !== 'y') {
|
||||
console.log('[migrate-tier] Aborted.');
|
||||
await sourceAdapter.close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const target = new PostgresMigrationTarget(opts.targetUrl);
|
||||
|
||||
try {
|
||||
const result = await runMigrateTier(
|
||||
sourceAdapter,
|
||||
target,
|
||||
{
|
||||
targetUrl: opts.targetUrl,
|
||||
dryRun: isDryRun,
|
||||
allowNonEmpty,
|
||||
batchSize,
|
||||
onProgress: (msg) => console.log(msg),
|
||||
},
|
||||
/* sourceHasVector= */ sourceTier === 'postgres',
|
||||
);
|
||||
|
||||
if (result.dryRun) {
|
||||
console.log('[migrate-tier] Dry run complete. No data was written.');
|
||||
} else {
|
||||
console.log(
|
||||
`[migrate-tier] Migration complete. ${result.totalRows.toString()} rows migrated.`,
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(
|
||||
`[migrate-tier] ERROR: ${redactErrMsg(err instanceof Error ? err.message : String(err))}`,
|
||||
);
|
||||
process.exitCode = 1;
|
||||
} finally {
|
||||
await Promise.all([sourceAdapter.close(), target.close()]);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
// ── storage migrate ──────────────────────────────────────────────────────
|
||||
|
||||
storage
|
||||
|
||||
@@ -1,8 +1,29 @@
|
||||
export type { StorageAdapter, StorageConfig } from './types.js';
|
||||
export { TierDetectionError, detectAndAssertTier, probeServiceHealth } from './tier-detection.js';
|
||||
export type { ServiceCheck, TierHealthReport } from './tier-detection.js';
|
||||
export { createStorageAdapter, registerStorageAdapter } from './factory.js';
|
||||
export { PostgresAdapter } from './adapters/postgres.js';
|
||||
export { PgliteAdapter } from './adapters/pglite.js';
|
||||
export { registerStorageCommand } from './cli.js';
|
||||
export {
|
||||
getMigrationOrder,
|
||||
topoSort,
|
||||
runMigrateTier,
|
||||
checkTargetPreconditions,
|
||||
normaliseSourceRow,
|
||||
PostgresMigrationTarget,
|
||||
DrizzleMigrationSource,
|
||||
SKIP_TABLES,
|
||||
MIGRATION_ORDER,
|
||||
MigrationPreconditionError,
|
||||
} from './migrate-tier.js';
|
||||
export type {
|
||||
MigrationSource,
|
||||
MigrationTarget,
|
||||
MigrateTierOptions,
|
||||
MigrateTierResult,
|
||||
TableMigrationResult,
|
||||
} from './migrate-tier.js';
|
||||
|
||||
import { registerStorageAdapter } from './factory.js';
|
||||
import { PostgresAdapter } from './adapters/postgres.js';
|
||||
|
||||
324
packages/storage/src/migrate-tier.integration.test.ts
Normal file
324
packages/storage/src/migrate-tier.integration.test.ts
Normal file
@@ -0,0 +1,324 @@
|
||||
/**
|
||||
* FED-M1-08 — Integration test: PGlite → federated Postgres+pgvector migration.
|
||||
*
|
||||
* Prereq: docker compose -f docker-compose.federated.yml --profile federated up -d
|
||||
* Run: FEDERATED_INTEGRATION=1 pnpm --filter @mosaicstack/storage test src/migrate-tier.integration.test.ts
|
||||
*
|
||||
* Skipped when FEDERATED_INTEGRATION !== '1'.
|
||||
*
|
||||
* Strategy: users.id (TEXT PK) uses the recognisable prefix `fed-m1-08-` for
|
||||
* easy cleanup. UUID-PKed tables (teams, conversations, messages, team_members)
|
||||
* use deterministic valid UUIDs in the `f0000xxx-…` namespace. Cleanup is
|
||||
* explicit DELETE by id — no full-table truncation.
|
||||
*/
|
||||
|
||||
import fs from 'node:fs/promises';
|
||||
import os from 'node:os';
|
||||
import path from 'node:path';
|
||||
|
||||
import { users, teams, teamMembers, conversations, messages } from '@mosaicstack/db';
|
||||
import { createPgliteDbWithVector, runPgliteMigrations } from './test-utils/pglite-with-vector.js';
|
||||
|
||||
import postgres from 'postgres';
|
||||
import { afterAll, describe, expect, it } from 'vitest';
|
||||
|
||||
import { DrizzleMigrationSource, PostgresMigrationTarget, runMigrateTier } from './migrate-tier.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Constants */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const run = process.env['FEDERATED_INTEGRATION'] === '1';
|
||||
|
||||
const FEDERATED_PG_URL = 'postgresql://mosaic:mosaic@localhost:5433/mosaic';
|
||||
|
||||
/**
|
||||
* Deterministic IDs for the test's seed data.
|
||||
*
|
||||
* users.id is TEXT (any string) — we use a recognisable prefix for easy cleanup.
|
||||
* All other tables use UUID primary keys — must be valid UUID v4 format.
|
||||
* The 4th segment starts with '4' (version 4) and 5th starts with '8' (variant).
|
||||
*/
|
||||
const IDS = {
|
||||
// text PK — can be any string
|
||||
user1: 'fed-m1-08-user-1',
|
||||
user2: 'fed-m1-08-user-2',
|
||||
// UUID PKs — must be valid UUID format
|
||||
team1: 'f0000001-0000-4000-8000-000000000001',
|
||||
teamMember1: 'f0000002-0000-4000-8000-000000000001',
|
||||
teamMember2: 'f0000002-0000-4000-8000-000000000002',
|
||||
conv1: 'f0000003-0000-4000-8000-000000000001',
|
||||
conv2: 'f0000003-0000-4000-8000-000000000002',
|
||||
msg1: 'f0000004-0000-4000-8000-000000000001',
|
||||
msg2: 'f0000004-0000-4000-8000-000000000002',
|
||||
msg3: 'f0000004-0000-4000-8000-000000000003',
|
||||
msg4: 'f0000004-0000-4000-8000-000000000004',
|
||||
msg5: 'f0000004-0000-4000-8000-000000000005',
|
||||
} as const;
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Shared handles for afterAll cleanup */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
let targetSql: ReturnType<typeof postgres> | undefined;
|
||||
let pgliteDataDir: string | undefined;
|
||||
|
||||
afterAll(async () => {
|
||||
if (targetSql) {
|
||||
await cleanTarget(targetSql).catch(() => {});
|
||||
await targetSql.end({ timeout: 5 }).catch(() => {});
|
||||
}
|
||||
if (pgliteDataDir) {
|
||||
await fs.rm(pgliteDataDir, { recursive: true, force: true }).catch(() => {});
|
||||
}
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/** Delete all test-owned rows from target in safe FK order. */
|
||||
async function cleanTarget(sql: ReturnType<typeof postgres>): Promise<void> {
|
||||
// Reverse FK order: messages → conversations → team_members → teams → users
|
||||
await sql.unsafe(`DELETE FROM messages WHERE id = ANY($1)`, [
|
||||
[IDS.msg1, IDS.msg2, IDS.msg3, IDS.msg4, IDS.msg5],
|
||||
] as never[]);
|
||||
await sql.unsafe(`DELETE FROM conversations WHERE id = ANY($1)`, [
|
||||
[IDS.conv1, IDS.conv2],
|
||||
] as never[]);
|
||||
await sql.unsafe(`DELETE FROM team_members WHERE id = ANY($1)`, [
|
||||
[IDS.teamMember1, IDS.teamMember2],
|
||||
] as never[]);
|
||||
await sql.unsafe(`DELETE FROM teams WHERE id = $1`, [IDS.team1] as never[]);
|
||||
await sql.unsafe(`DELETE FROM users WHERE id = ANY($1)`, [[IDS.user1, IDS.user2]] as never[]);
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Test suite */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe.skipIf(!run)('migrate-tier — PGlite → federated PG', () => {
|
||||
it('seeds PGlite, runs migrate-tier, asserts row counts and sample rows on target', async () => {
|
||||
/* ---- 1. Create a temp PGlite db ---------------------------------- */
|
||||
|
||||
pgliteDataDir = await fs.mkdtemp(path.join(os.tmpdir(), 'fed-m1-08-'));
|
||||
const handle = createPgliteDbWithVector(pgliteDataDir);
|
||||
|
||||
// Run Drizzle migrations against PGlite.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await runPgliteMigrations(handle.db as any);
|
||||
|
||||
/* ---- 2. Seed representative data --------------------------------- */
|
||||
|
||||
const now = new Date();
|
||||
const db = handle.db;
|
||||
|
||||
// users (2 rows)
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await (db as any).insert(users).values([
|
||||
{
|
||||
id: IDS.user1,
|
||||
name: 'Fed Test User One',
|
||||
email: 'fed-m1-08-user1@test.invalid',
|
||||
emailVerified: false,
|
||||
role: 'member',
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.user2,
|
||||
name: 'Fed Test User Two',
|
||||
email: 'fed-m1-08-user2@test.invalid',
|
||||
emailVerified: false,
|
||||
role: 'member',
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
},
|
||||
]);
|
||||
|
||||
// teams (1 row)
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await (db as any).insert(teams).values([
|
||||
{
|
||||
id: IDS.team1,
|
||||
name: 'Fed M1-08 Team',
|
||||
slug: 'fed-m1-08-team',
|
||||
ownerId: IDS.user1,
|
||||
managerId: IDS.user1,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
},
|
||||
]);
|
||||
|
||||
// team_members (2 rows linking both users to the team)
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await (db as any).insert(teamMembers).values([
|
||||
{
|
||||
id: IDS.teamMember1,
|
||||
teamId: IDS.team1,
|
||||
userId: IDS.user1,
|
||||
role: 'manager',
|
||||
joinedAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.teamMember2,
|
||||
teamId: IDS.team1,
|
||||
userId: IDS.user2,
|
||||
role: 'member',
|
||||
joinedAt: now,
|
||||
},
|
||||
]);
|
||||
|
||||
// conversations (2 rows)
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await (db as any).insert(conversations).values([
|
||||
{
|
||||
id: IDS.conv1,
|
||||
title: 'Fed M1-08 Conversation Alpha',
|
||||
userId: IDS.user1,
|
||||
archived: false,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.conv2,
|
||||
title: 'Fed M1-08 Conversation Beta',
|
||||
userId: IDS.user2,
|
||||
archived: false,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
},
|
||||
]);
|
||||
|
||||
// messages (5 rows across both conversations)
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await (db as any).insert(messages).values([
|
||||
{
|
||||
id: IDS.msg1,
|
||||
conversationId: IDS.conv1,
|
||||
role: 'user',
|
||||
content: 'Hello from conv1 msg1',
|
||||
createdAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.msg2,
|
||||
conversationId: IDS.conv1,
|
||||
role: 'assistant',
|
||||
content: 'Reply in conv1 msg2',
|
||||
createdAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.msg3,
|
||||
conversationId: IDS.conv1,
|
||||
role: 'user',
|
||||
content: 'Follow-up in conv1 msg3',
|
||||
createdAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.msg4,
|
||||
conversationId: IDS.conv2,
|
||||
role: 'user',
|
||||
content: 'Hello from conv2 msg4',
|
||||
createdAt: now,
|
||||
},
|
||||
{
|
||||
id: IDS.msg5,
|
||||
conversationId: IDS.conv2,
|
||||
role: 'assistant',
|
||||
content: 'Reply in conv2 msg5',
|
||||
createdAt: now,
|
||||
},
|
||||
]);
|
||||
|
||||
/* ---- 3. Pre-clean the target so the test is repeatable ----------- */
|
||||
|
||||
targetSql = postgres(FEDERATED_PG_URL, {
|
||||
max: 3,
|
||||
connect_timeout: 10,
|
||||
idle_timeout: 30,
|
||||
});
|
||||
|
||||
await cleanTarget(targetSql);
|
||||
|
||||
/* ---- 4. Build source / target adapters and run migration --------- */
|
||||
|
||||
const source = new DrizzleMigrationSource(db, /* sourceHasVector= */ false);
|
||||
const target = new PostgresMigrationTarget(FEDERATED_PG_URL);
|
||||
|
||||
try {
|
||||
await runMigrateTier(
|
||||
source,
|
||||
target,
|
||||
{
|
||||
targetUrl: FEDERATED_PG_URL,
|
||||
dryRun: false,
|
||||
allowNonEmpty: true,
|
||||
batchSize: 500,
|
||||
onProgress: (_msg) => {
|
||||
// Uncomment for debugging: console.log(_msg);
|
||||
},
|
||||
},
|
||||
/* sourceHasVector= */ false,
|
||||
);
|
||||
} finally {
|
||||
await target.close();
|
||||
}
|
||||
|
||||
/* ---- 5. Assert: row counts in target match seed ------------------ */
|
||||
|
||||
const countUsers = await targetSql.unsafe<Array<{ n: string }>>(
|
||||
`SELECT COUNT(*)::text AS n FROM users WHERE id = ANY($1)`,
|
||||
[[IDS.user1, IDS.user2]] as never[],
|
||||
);
|
||||
expect(Number(countUsers[0]?.n)).toBe(2);
|
||||
|
||||
const countTeams = await targetSql.unsafe<Array<{ n: string }>>(
|
||||
`SELECT COUNT(*)::text AS n FROM teams WHERE id = $1`,
|
||||
[IDS.team1] as never[],
|
||||
);
|
||||
expect(Number(countTeams[0]?.n)).toBe(1);
|
||||
|
||||
const countTeamMembers = await targetSql.unsafe<Array<{ n: string }>>(
|
||||
`SELECT COUNT(*)::text AS n FROM team_members WHERE id = ANY($1)`,
|
||||
[[IDS.teamMember1, IDS.teamMember2]] as never[],
|
||||
);
|
||||
expect(Number(countTeamMembers[0]?.n)).toBe(2);
|
||||
|
||||
const countConvs = await targetSql.unsafe<Array<{ n: string }>>(
|
||||
`SELECT COUNT(*)::text AS n FROM conversations WHERE id = ANY($1)`,
|
||||
[[IDS.conv1, IDS.conv2]] as never[],
|
||||
);
|
||||
expect(Number(countConvs[0]?.n)).toBe(2);
|
||||
|
||||
const countMsgs = await targetSql.unsafe<Array<{ n: string }>>(
|
||||
`SELECT COUNT(*)::text AS n FROM messages WHERE id = ANY($1)`,
|
||||
[[IDS.msg1, IDS.msg2, IDS.msg3, IDS.msg4, IDS.msg5]] as never[],
|
||||
);
|
||||
expect(Number(countMsgs[0]?.n)).toBe(5);
|
||||
|
||||
/* ---- 6. Assert: sample row field values --------------------------- */
|
||||
|
||||
// User 1: check email and name
|
||||
const userRows = await targetSql.unsafe<Array<{ id: string; email: string; name: string }>>(
|
||||
`SELECT id, email, name FROM users WHERE id = $1`,
|
||||
[IDS.user1] as never[],
|
||||
);
|
||||
expect(userRows[0]?.email).toBe('fed-m1-08-user1@test.invalid');
|
||||
expect(userRows[0]?.name).toBe('Fed Test User One');
|
||||
|
||||
// Conversation 1: check title and user_id
|
||||
const convRows = await targetSql.unsafe<Array<{ id: string; title: string; user_id: string }>>(
|
||||
`SELECT id, title, user_id FROM conversations WHERE id = $1`,
|
||||
[IDS.conv1] as never[],
|
||||
);
|
||||
expect(convRows[0]?.title).toBe('Fed M1-08 Conversation Alpha');
|
||||
expect(convRows[0]?.user_id).toBe(IDS.user1);
|
||||
|
||||
/* ---- 7. Cleanup: delete test rows from target -------------------- */
|
||||
|
||||
await cleanTarget(targetSql);
|
||||
|
||||
// Close PGlite
|
||||
await handle.close();
|
||||
}, 60_000);
|
||||
});
|
||||
495
packages/storage/src/migrate-tier.spec.ts
Normal file
495
packages/storage/src/migrate-tier.spec.ts
Normal file
@@ -0,0 +1,495 @@
|
||||
/**
|
||||
* migrate-tier.spec.ts — Unit tests for the migrate-tier core logic.
|
||||
*
|
||||
* These are pure unit tests — no real database connections.
|
||||
* FED-M1-08 will add integration tests against real services.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import {
|
||||
getMigrationOrder,
|
||||
topoSort,
|
||||
runMigrateTier,
|
||||
checkTargetPreconditions,
|
||||
normaliseSourceRow,
|
||||
SKIP_TABLES,
|
||||
MigrationPreconditionError,
|
||||
type MigrationSource,
|
||||
type MigrationTarget,
|
||||
} from './migrate-tier.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Mock factories */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Build a mock MigrationSource backed by an in-memory table map.
|
||||
* Implements the DrizzleMigrationSource-shaped contract:
|
||||
* - readTable(tableName, opts?) returns paginated rows
|
||||
* - count(tableName) returns row count
|
||||
*
|
||||
* The `sourceHasVector` flag controls whether the mock simulates the
|
||||
* no-pgvector projection: when false and tableName is 'insights', rows
|
||||
* are returned WITHOUT the 'embedding' field (matching DrizzleMigrationSource
|
||||
* behaviour for local/PGlite sources).
|
||||
*/
|
||||
function makeMockSource(
|
||||
data: Record<string, Record<string, unknown>[]>,
|
||||
sourceHasVector = true,
|
||||
): MigrationSource & {
|
||||
readTableCalls: Array<{ table: string; opts?: { limit?: number; offset?: number } }>;
|
||||
} {
|
||||
const readTableCalls: Array<{ table: string; opts?: { limit?: number; offset?: number } }> = [];
|
||||
return {
|
||||
readTableCalls,
|
||||
readTable: vi.fn(async (tableName: string, opts?: { limit?: number; offset?: number }) => {
|
||||
readTableCalls.push({ table: tableName, opts });
|
||||
let rows = data[tableName] ?? [];
|
||||
// Simulate no-vector projection: omit 'embedding' from insights rows
|
||||
// when sourceHasVector is false (matches DrizzleMigrationSource behaviour).
|
||||
if (tableName === 'insights' && !sourceHasVector) {
|
||||
rows = rows.map(({ embedding: _omit, ...rest }) => rest);
|
||||
}
|
||||
const offset = opts?.offset ?? 0;
|
||||
const limit = opts?.limit ?? rows.length;
|
||||
return rows.slice(offset, offset + limit);
|
||||
}),
|
||||
count: vi.fn(async (tableName: string) => (data[tableName] ?? []).length),
|
||||
close: vi.fn(async () => undefined),
|
||||
};
|
||||
}
|
||||
|
||||
function makeMockTarget(opts?: {
|
||||
hasPgvector?: boolean;
|
||||
nonEmptyTable?: string;
|
||||
}): MigrationTarget & { upsertCalls: Array<{ table: string; rows: Record<string, unknown>[] }> } {
|
||||
const upsertCalls: Array<{ table: string; rows: Record<string, unknown>[] }> = [];
|
||||
const storedCounts: Record<string, number> = {};
|
||||
|
||||
return {
|
||||
upsertCalls,
|
||||
upsertBatch: vi.fn(async (table: string, rows: Record<string, unknown>[]) => {
|
||||
upsertCalls.push({ table, rows });
|
||||
storedCounts[table] = (storedCounts[table] ?? 0) + rows.length;
|
||||
}),
|
||||
count: vi.fn(async (table: string) => {
|
||||
if (opts?.nonEmptyTable === table) return 5;
|
||||
return storedCounts[table] ?? 0;
|
||||
}),
|
||||
hasPgvector: vi.fn(async () => opts?.hasPgvector ?? true),
|
||||
close: vi.fn(async () => undefined),
|
||||
};
|
||||
}
|
||||
|
||||
function noopProgress(): (msg: string) => void {
|
||||
return () => undefined;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 1. Topological ordering */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('topoSort', () => {
|
||||
it('returns empty array for empty input', () => {
|
||||
expect(topoSort(new Map())).toEqual([]);
|
||||
});
|
||||
|
||||
it('orders parents before children — linear chain', () => {
|
||||
// users -> teams -> messages
|
||||
const deps = new Map([
|
||||
['users', []],
|
||||
['teams', ['users']],
|
||||
['messages', ['teams']],
|
||||
]);
|
||||
const order = topoSort(deps);
|
||||
expect(order.indexOf('users')).toBeLessThan(order.indexOf('teams'));
|
||||
expect(order.indexOf('teams')).toBeLessThan(order.indexOf('messages'));
|
||||
});
|
||||
|
||||
it('orders parents before children — diamond graph', () => {
|
||||
// a -> (b, c) -> d
|
||||
const deps = new Map([
|
||||
['a', []],
|
||||
['b', ['a']],
|
||||
['c', ['a']],
|
||||
['d', ['b', 'c']],
|
||||
]);
|
||||
const order = topoSort(deps);
|
||||
expect(order.indexOf('a')).toBeLessThan(order.indexOf('b'));
|
||||
expect(order.indexOf('a')).toBeLessThan(order.indexOf('c'));
|
||||
expect(order.indexOf('b')).toBeLessThan(order.indexOf('d'));
|
||||
expect(order.indexOf('c')).toBeLessThan(order.indexOf('d'));
|
||||
});
|
||||
|
||||
it('throws on cyclic dependencies', () => {
|
||||
const deps = new Map([
|
||||
['a', ['b']],
|
||||
['b', ['a']],
|
||||
]);
|
||||
expect(() => topoSort(deps)).toThrow('Cycle detected');
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 2. getMigrationOrder — sessions / verifications excluded */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('getMigrationOrder', () => {
|
||||
it('does not include "sessions"', () => {
|
||||
expect(getMigrationOrder()).not.toContain('sessions');
|
||||
});
|
||||
|
||||
it('does not include "verifications"', () => {
|
||||
expect(getMigrationOrder()).not.toContain('verifications');
|
||||
});
|
||||
|
||||
it('does not include "admin_tokens"', () => {
|
||||
expect(getMigrationOrder()).not.toContain('admin_tokens');
|
||||
});
|
||||
|
||||
it('includes "users" before "teams"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('users')).toBeLessThan(order.indexOf('teams'));
|
||||
});
|
||||
|
||||
it('includes "users" before "conversations"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('users')).toBeLessThan(order.indexOf('conversations'));
|
||||
});
|
||||
|
||||
it('includes "conversations" before "messages"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('conversations')).toBeLessThan(order.indexOf('messages'));
|
||||
});
|
||||
|
||||
it('includes "projects" before "agents"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('projects')).toBeLessThan(order.indexOf('agents'));
|
||||
});
|
||||
|
||||
it('includes "agents" before "conversations"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('agents')).toBeLessThan(order.indexOf('conversations'));
|
||||
});
|
||||
|
||||
it('includes "missions" before "mission_tasks"', () => {
|
||||
const order = getMigrationOrder();
|
||||
expect(order.indexOf('missions')).toBeLessThan(order.indexOf('mission_tasks'));
|
||||
});
|
||||
|
||||
it('includes all expected tables', () => {
|
||||
const order = getMigrationOrder();
|
||||
const expected = [
|
||||
'users',
|
||||
'teams',
|
||||
'accounts',
|
||||
'projects',
|
||||
'agents',
|
||||
'conversations',
|
||||
'messages',
|
||||
'insights',
|
||||
];
|
||||
for (const t of expected) {
|
||||
expect(order).toContain(t);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 3. Dry-run makes no writes */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runMigrateTier — dry-run', () => {
|
||||
it('makes no calls to upsertBatch', async () => {
|
||||
const source = makeMockSource({
|
||||
users: [{ id: 'u1', name: 'Alice', email: 'alice@example.com' }],
|
||||
});
|
||||
const target = makeMockTarget();
|
||||
|
||||
const result = await runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: true,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
expect(target.upsertCalls).toHaveLength(0);
|
||||
expect(result.dryRun).toBe(true);
|
||||
expect(result.totalRows).toBe(0);
|
||||
});
|
||||
|
||||
it('does not call checkTargetPreconditions in dry-run', async () => {
|
||||
// Even if hasPgvector is false, dry-run should not throw.
|
||||
const source = makeMockSource({});
|
||||
const target = makeMockTarget({ hasPgvector: false });
|
||||
|
||||
await expect(
|
||||
runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: true,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
}),
|
||||
).resolves.not.toThrow();
|
||||
|
||||
// hasPgvector should NOT have been called during dry run.
|
||||
expect(target.hasPgvector).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 4. Idempotency */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runMigrateTier — idempotency', () => {
|
||||
it('produces the same logical row count on second run (upsert semantics)', async () => {
|
||||
const userData = [
|
||||
{ id: 'u1', name: 'Alice', email: 'alice@example.com' },
|
||||
{ id: 'u2', name: 'Bob', email: 'bob@example.com' },
|
||||
];
|
||||
|
||||
const source = makeMockSource({ users: userData });
|
||||
|
||||
// First run target.
|
||||
const target1 = makeMockTarget();
|
||||
await runMigrateTier(source, target1, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const firstRunUpserts = target1.upsertCalls.filter((c) => c.table === 'users');
|
||||
const firstRunRows = firstRunUpserts.reduce((acc, c) => acc + c.rows.length, 0);
|
||||
|
||||
// Second run — allowNonEmpty because first run already wrote rows.
|
||||
const target2 = makeMockTarget();
|
||||
await runMigrateTier(source, target2, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: true,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const secondRunUpserts = target2.upsertCalls.filter((c) => c.table === 'users');
|
||||
const secondRunRows = secondRunUpserts.reduce((acc, c) => acc + c.rows.length, 0);
|
||||
|
||||
// Both runs write the same number of rows (upsert — second run updates in place).
|
||||
expect(firstRunRows).toBe(userData.length);
|
||||
expect(secondRunRows).toBe(userData.length);
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 5. Empty-target precondition */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('checkTargetPreconditions', () => {
|
||||
it('throws when target table is non-empty and allowNonEmpty is false', async () => {
|
||||
const target = makeMockTarget({ nonEmptyTable: 'users' });
|
||||
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toThrow(
|
||||
MigrationPreconditionError,
|
||||
);
|
||||
});
|
||||
|
||||
it('includes remediation hint in thrown error', async () => {
|
||||
const target = makeMockTarget({ nonEmptyTable: 'users' });
|
||||
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toMatchObject({
|
||||
name: 'MigrationPreconditionError',
|
||||
remediation: expect.stringContaining('--allow-non-empty'),
|
||||
});
|
||||
});
|
||||
|
||||
it('does NOT throw when allowNonEmpty is true', async () => {
|
||||
const target = makeMockTarget({ nonEmptyTable: 'users' });
|
||||
await expect(checkTargetPreconditions(target, true, ['users'])).resolves.not.toThrow();
|
||||
});
|
||||
|
||||
it('throws when pgvector extension is missing', async () => {
|
||||
const target = makeMockTarget({ hasPgvector: false });
|
||||
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).rejects.toMatchObject({
|
||||
name: 'MigrationPreconditionError',
|
||||
remediation: expect.stringContaining('pgvector'),
|
||||
});
|
||||
});
|
||||
|
||||
it('passes when target is empty and pgvector is present', async () => {
|
||||
const target = makeMockTarget({ hasPgvector: true });
|
||||
await expect(checkTargetPreconditions(target, false, ['users'])).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 6. Skipped tables documented */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('SKIP_TABLES', () => {
|
||||
it('includes "sessions"', () => {
|
||||
expect(SKIP_TABLES.has('sessions')).toBe(true);
|
||||
});
|
||||
|
||||
it('includes "verifications"', () => {
|
||||
expect(SKIP_TABLES.has('verifications')).toBe(true);
|
||||
});
|
||||
|
||||
it('includes "admin_tokens"', () => {
|
||||
expect(SKIP_TABLES.has('admin_tokens')).toBe(true);
|
||||
});
|
||||
|
||||
it('migration result includes skipped table entries', async () => {
|
||||
const source = makeMockSource({});
|
||||
const target = makeMockTarget();
|
||||
|
||||
const result = await runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const skippedNames = result.tables.filter((t) => t.skipped).map((t) => t.table);
|
||||
expect(skippedNames).toContain('sessions');
|
||||
expect(skippedNames).toContain('verifications');
|
||||
expect(skippedNames).toContain('admin_tokens');
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 7. Embedding NULL on migrate from non-pgvector source */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('normaliseSourceRow — embedding handling', () => {
|
||||
it('sets embedding to null when sourceHasVector is false and table is insights', () => {
|
||||
const row: Record<string, unknown> = {
|
||||
id: 'ins-1',
|
||||
content: 'Some insight',
|
||||
userId: 'u1',
|
||||
};
|
||||
const normalised = normaliseSourceRow('insights', row, false);
|
||||
expect(normalised['embedding']).toBeNull();
|
||||
});
|
||||
|
||||
it('preserves existing embedding when sourceHasVector is true', () => {
|
||||
const embedding = [0.1, 0.2, 0.3];
|
||||
const row: Record<string, unknown> = {
|
||||
id: 'ins-1',
|
||||
content: 'Some insight',
|
||||
userId: 'u1',
|
||||
embedding,
|
||||
};
|
||||
const normalised = normaliseSourceRow('insights', row, true);
|
||||
expect(normalised['embedding']).toBe(embedding);
|
||||
});
|
||||
|
||||
it('does not add embedding field to non-vector tables', () => {
|
||||
const row: Record<string, unknown> = { id: 'u1', name: 'Alice' };
|
||||
const normalised = normaliseSourceRow('users', row, false);
|
||||
expect('embedding' in normalised).toBe(false);
|
||||
});
|
||||
|
||||
it('passes through rows for non-vector tables unchanged', () => {
|
||||
const row: Record<string, unknown> = { id: 'u1', name: 'Alice', email: 'alice@test.com' };
|
||||
const normalised = normaliseSourceRow('users', row, false);
|
||||
expect(normalised).toEqual(row);
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 8. End-to-end: correct order of upsert calls */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('runMigrateTier — migration order', () => {
|
||||
it('writes users before messages', async () => {
|
||||
const source = makeMockSource({
|
||||
users: [{ id: 'u1', name: 'Alice', email: 'alice@test.com' }],
|
||||
messages: [{ id: 'm1', conversationId: 'c1', role: 'user', content: 'Hi' }],
|
||||
});
|
||||
const target = makeMockTarget();
|
||||
|
||||
await runMigrateTier(source, target, {
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
});
|
||||
|
||||
const tableOrder = target.upsertCalls.map((c) => c.table);
|
||||
const usersIdx = tableOrder.indexOf('users');
|
||||
const messagesIdx = tableOrder.indexOf('messages');
|
||||
// users must appear before messages in the upsert call sequence.
|
||||
expect(usersIdx).toBeGreaterThanOrEqual(0);
|
||||
expect(messagesIdx).toBeGreaterThanOrEqual(0);
|
||||
expect(usersIdx).toBeLessThan(messagesIdx);
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* 9. Embedding-null projection: no-pgvector source */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('DrizzleMigrationSource embedding-null projection', () => {
|
||||
it(
|
||||
'when sourceHasVector is false, readTable for insights omits embedding column ' +
|
||||
'and normaliseSourceRow sets it to null for the target insert',
|
||||
async () => {
|
||||
// Source has insights data but no vector — embedding omitted at read time.
|
||||
const insightRowWithEmbedding = {
|
||||
id: 'ins-1',
|
||||
userId: 'u1',
|
||||
content: 'Test insight',
|
||||
embedding: [0.1, 0.2, 0.3], // present in raw data but omitted by source
|
||||
source: 'agent',
|
||||
category: 'general',
|
||||
relevanceScore: 1.0,
|
||||
};
|
||||
|
||||
// makeMockSource with sourceHasVector=false simulates DrizzleMigrationSource
|
||||
// behaviour: the embedding field is stripped from the returned row.
|
||||
const source = makeMockSource(
|
||||
{
|
||||
users: [{ id: 'u1', name: 'Alice', email: 'alice@test.com' }],
|
||||
insights: [insightRowWithEmbedding],
|
||||
},
|
||||
/* sourceHasVector= */ false,
|
||||
);
|
||||
const target = makeMockTarget();
|
||||
|
||||
await runMigrateTier(
|
||||
source,
|
||||
target,
|
||||
{
|
||||
targetUrl: 'postgresql://localhost/test',
|
||||
dryRun: false,
|
||||
allowNonEmpty: false,
|
||||
batchSize: 100,
|
||||
onProgress: noopProgress(),
|
||||
},
|
||||
/* sourceHasVector= */ false,
|
||||
);
|
||||
|
||||
// Assert: readTable was called for insights
|
||||
const insightsRead = source.readTableCalls.find((c) => c.table === 'insights');
|
||||
expect(insightsRead).toBeDefined();
|
||||
|
||||
// Assert: the upsert to insights has embedding === null (not the original vector)
|
||||
const insightsUpsert = target.upsertCalls.find((c) => c.table === 'insights');
|
||||
expect(insightsUpsert).toBeDefined();
|
||||
const upsertedRow = insightsUpsert!.rows[0];
|
||||
expect(upsertedRow).toBeDefined();
|
||||
// embedding must be null — not the original [0.1, 0.2, 0.3]
|
||||
expect(upsertedRow!['embedding']).toBeNull();
|
||||
// Other fields must pass through unchanged
|
||||
expect(upsertedRow!['id']).toBe('ins-1');
|
||||
expect(upsertedRow!['content']).toBe('Test insight');
|
||||
},
|
||||
);
|
||||
});
|
||||
799
packages/storage/src/migrate-tier.ts
Normal file
799
packages/storage/src/migrate-tier.ts
Normal file
@@ -0,0 +1,799 @@
|
||||
/**
|
||||
* migrate-tier.ts — Core logic for `mosaic storage migrate-tier`.
|
||||
*
|
||||
* Migrates data from `tier: local` (PGlite, normalized Drizzle schema) or
|
||||
* `tier: standalone` (Postgres without pgvector) → `tier: federated`
|
||||
* (Postgres + pgvector).
|
||||
*
|
||||
* Source: DrizzleMigrationSource — reads from the NORMALIZED Drizzle/relational
|
||||
* schema tables (not the flat `id TEXT + data JSONB` PgliteAdapter schema).
|
||||
* Both local (PGlite) and standalone (Postgres) sources use the same Drizzle
|
||||
* abstraction via createPgliteDb() or createDb() from @mosaicstack/db.
|
||||
* Target: PostgresMigrationTarget — upserts via raw SQL into the same schema.
|
||||
*
|
||||
* Key design decisions:
|
||||
* - Tables are migrated in topological (FK-dependency) order so that
|
||||
* parent rows exist before child rows are inserted.
|
||||
* - sessions + verifications are skipped — they are ephemeral / TTL'd.
|
||||
* - adminTokens is skipped — token hashes are environment-specific
|
||||
* and should be re-issued on the target.
|
||||
* - insights.embedding is omitted from source SELECT when the source lacks
|
||||
* pgvector (local/PGlite tier); target insert gets NULL for that column.
|
||||
* insights.embedding is nullable per schema (no .notNull() constraint).
|
||||
* - Each table's batch is wrapped in a transaction for atomicity.
|
||||
* - Upsert semantics (ON CONFLICT DO UPDATE) make re-runs idempotent.
|
||||
*
|
||||
* TODO (FED-M1-08): Add integration tests against real PGlite → real PG.
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import * as schema from '@mosaicstack/db';
|
||||
import { sql as drizzleSql } from '@mosaicstack/db';
|
||||
import { redactErrMsg } from './redact-error.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Types */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export interface MigrationSource {
|
||||
/**
|
||||
* Return all rows from a table (normalized Drizzle schema rows).
|
||||
* When sourceHasVector is false and the table has a vector column,
|
||||
* the source MUST omit the vector column from the result and the
|
||||
* caller will set it to null (see normaliseSourceRow).
|
||||
*/
|
||||
readTable(
|
||||
tableName: string,
|
||||
opts?: { limit?: number; offset?: number },
|
||||
): Promise<Record<string, unknown>[]>;
|
||||
|
||||
/** Count rows in a table. */
|
||||
count(tableName: string): Promise<number>;
|
||||
|
||||
/** Close the source connection. */
|
||||
close(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface MigrationTarget {
|
||||
/**
|
||||
* Upsert a batch of rows into a table.
|
||||
* Must use ON CONFLICT (id) DO UPDATE semantics.
|
||||
*/
|
||||
upsertBatch(tableName: string, rows: Record<string, unknown>[]): Promise<void>;
|
||||
|
||||
/**
|
||||
* Count rows in a target table.
|
||||
*/
|
||||
count(tableName: string): Promise<number>;
|
||||
|
||||
/**
|
||||
* Check whether pgvector extension is installed.
|
||||
*/
|
||||
hasPgvector(): Promise<boolean>;
|
||||
|
||||
/** Close the target connection. */
|
||||
close(): Promise<void>;
|
||||
|
||||
/**
|
||||
* Attempt to acquire a session-level Postgres advisory lock for migrate-tier.
|
||||
* Returns true if the lock was acquired, false if another process holds it.
|
||||
* Targets that do not support advisory locks (e.g. test mocks) may omit this
|
||||
* by not implementing the method — the caller skips locking gracefully.
|
||||
*/
|
||||
tryAcquireAdvisoryLock?(): Promise<boolean>;
|
||||
|
||||
/**
|
||||
* Release the session-level advisory lock acquired by tryAcquireAdvisoryLock.
|
||||
* Must be called in a finally block.
|
||||
*/
|
||||
releaseAdvisoryLock?(): Promise<void>;
|
||||
}
|
||||
|
||||
export interface MigrateTierOptions {
|
||||
/** Target postgres connection URL. */
|
||||
targetUrl: string;
|
||||
/** Whether to skip all writes (dry-run). */
|
||||
dryRun: boolean;
|
||||
/** Skip the non-empty target guard. */
|
||||
allowNonEmpty: boolean;
|
||||
/** Rows per transaction batch. */
|
||||
batchSize: number;
|
||||
/** Called with progress messages. */
|
||||
onProgress: (msg: string) => void;
|
||||
}
|
||||
|
||||
export interface TableMigrationResult {
|
||||
table: string;
|
||||
rowsMigrated: number;
|
||||
skipped: boolean;
|
||||
skipReason?: string;
|
||||
}
|
||||
|
||||
export interface MigrateTierResult {
|
||||
tables: TableMigrationResult[];
|
||||
totalRows: number;
|
||||
dryRun: boolean;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Schema: FK-aware topological table order */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* SKIP_TABLES: ephemeral or environment-specific tables not worth migrating.
|
||||
*
|
||||
* WHY these tables are skipped:
|
||||
* - sessions: TTL'd auth sessions — they are invalid in the new environment
|
||||
* and would immediately expire or fail JWT verification anyway.
|
||||
* - verifications: one-time tokens (email verify, password-reset links, etc.)
|
||||
* — they have already expired or been consumed; re-sending is
|
||||
* the correct action on the new environment.
|
||||
* - admin_tokens: hashed tokens bound to the old environment's secret keys —
|
||||
* the hash is environment-specific and must be re-issued on
|
||||
* the target.
|
||||
*
|
||||
* WHY these tables are NOT skipped (intentionally migrated):
|
||||
* - accounts (OAuth tokens): durable credentials bound to the user's identity,
|
||||
* not to the deployment environment. OAuth tokens survive environment changes
|
||||
* and should follow the user to the federated tier.
|
||||
* - provider_credentials (AI provider keys): durable, user-owned API keys for
|
||||
* AI providers (e.g. OpenAI, Anthropic). These are bound to the user, not
|
||||
* the server, and must be preserved so AI features work immediately after
|
||||
* migration.
|
||||
*
|
||||
* OPERATOR NOTE: If migrating to a shared or multi-tenant federated tier, review
|
||||
* whether `accounts` and `provider_credentials` should be wiped post-migration
|
||||
* to prevent unintended cross-tenant credential exposure.
|
||||
*/
|
||||
export const SKIP_TABLES = new Set(['sessions', 'verifications', 'admin_tokens']);
|
||||
|
||||
/**
|
||||
* Topologically ordered table list (parents before children).
|
||||
*
|
||||
* Derived from FK references in packages/db/src/schema.ts:
|
||||
*
|
||||
* users (no FKs)
|
||||
* teams → users
|
||||
* team_members → teams, users
|
||||
* accounts → users
|
||||
* projects → users, teams
|
||||
* agents → projects, users
|
||||
* missions → projects, users
|
||||
* tasks → projects, missions
|
||||
* mission_tasks → missions, tasks, users
|
||||
* conversations → users, projects, agents
|
||||
* messages → conversations
|
||||
* preferences → users
|
||||
* insights → users [has embedding vector column]
|
||||
* agent_logs → users
|
||||
* skills → users (installedBy, nullable)
|
||||
* routing_rules → users (userId, nullable)
|
||||
* provider_credentials → users
|
||||
* appreciations (no FKs)
|
||||
* events (no FKs)
|
||||
* tickets (no FKs)
|
||||
* summarization_jobs (no FKs)
|
||||
*
|
||||
* Skipped (not in this list):
|
||||
* sessions → users (ephemeral)
|
||||
* verifications (no FKs, ephemeral)
|
||||
* admin_tokens → users (environment-specific)
|
||||
*/
|
||||
export const MIGRATION_ORDER: string[] = [
|
||||
'users',
|
||||
'teams',
|
||||
'team_members',
|
||||
'accounts',
|
||||
'projects',
|
||||
'agents',
|
||||
'missions',
|
||||
'tasks',
|
||||
'mission_tasks',
|
||||
'conversations',
|
||||
'messages',
|
||||
'preferences',
|
||||
'insights',
|
||||
'agent_logs',
|
||||
'skills',
|
||||
'routing_rules',
|
||||
'provider_credentials',
|
||||
'appreciations',
|
||||
'events',
|
||||
'tickets',
|
||||
'summarization_jobs',
|
||||
];
|
||||
|
||||
/** Tables that carry a vector embedding column on the target. */
|
||||
const VECTOR_TABLES = new Set(['insights']);
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Utility: derive topological order from an adjacency list */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Given an adjacency list (table → list of tables it depends on),
|
||||
* return a valid topological ordering (Kahn's algorithm).
|
||||
*
|
||||
* Exposed for unit testing.
|
||||
*/
|
||||
export function topoSort(deps: Map<string, string[]>): string[] {
|
||||
const nodes = [...deps.keys()];
|
||||
const inDegree = new Map<string, number>();
|
||||
const adjReverse = new Map<string, string[]>();
|
||||
|
||||
for (const node of nodes) {
|
||||
if (!inDegree.has(node)) inDegree.set(node, 0);
|
||||
if (!adjReverse.has(node)) adjReverse.set(node, []);
|
||||
for (const dep of deps.get(node) ?? []) {
|
||||
inDegree.set(node, (inDegree.get(node) ?? 0) + 1);
|
||||
if (!adjReverse.has(dep)) adjReverse.set(dep, []);
|
||||
adjReverse.get(dep)!.push(node);
|
||||
}
|
||||
}
|
||||
|
||||
// Start with nodes that have no dependencies.
|
||||
const queue: string[] = [];
|
||||
for (const [node, deg] of inDegree) {
|
||||
if (deg === 0) queue.push(node);
|
||||
}
|
||||
|
||||
const result: string[] = [];
|
||||
while (queue.length > 0) {
|
||||
const node = queue.shift()!;
|
||||
result.push(node);
|
||||
for (const dependent of adjReverse.get(node) ?? []) {
|
||||
const newDeg = (inDegree.get(dependent) ?? 0) - 1;
|
||||
inDegree.set(dependent, newDeg);
|
||||
if (newDeg === 0) queue.push(dependent);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.length !== nodes.length) {
|
||||
throw new Error('Cycle detected in FK dependency graph');
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the migration table order, excluding SKIP_TABLES.
|
||||
* Uses the pre-computed MIGRATION_ORDER (verified against schema.ts).
|
||||
*/
|
||||
export function getMigrationOrder(): string[] {
|
||||
return MIGRATION_ORDER.filter((t) => !SKIP_TABLES.has(t));
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* TABLE_OBJECTS: migration table name → Drizzle table object */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Maps MIGRATION_ORDER table names to their corresponding Drizzle table
|
||||
* objects from the normalized schema. Used by DrizzleMigrationSource to
|
||||
* execute typed `db.select().from(table)` queries.
|
||||
*
|
||||
* Keyed by snake_case table name (matching MIGRATION_ORDER + SKIP_TABLES).
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const TABLE_OBJECTS: Record<string, any> = {
|
||||
users: schema.users,
|
||||
teams: schema.teams,
|
||||
team_members: schema.teamMembers,
|
||||
accounts: schema.accounts,
|
||||
projects: schema.projects,
|
||||
agents: schema.agents,
|
||||
missions: schema.missions,
|
||||
tasks: schema.tasks,
|
||||
mission_tasks: schema.missionTasks,
|
||||
conversations: schema.conversations,
|
||||
messages: schema.messages,
|
||||
preferences: schema.preferences,
|
||||
insights: schema.insights,
|
||||
agent_logs: schema.agentLogs,
|
||||
skills: schema.skills,
|
||||
routing_rules: schema.routingRules,
|
||||
provider_credentials: schema.providerCredentials,
|
||||
appreciations: schema.appreciations,
|
||||
events: schema.events,
|
||||
tickets: schema.tickets,
|
||||
summarization_jobs: schema.summarizationJobs,
|
||||
// Skipped tables — included so count() works for preflight but never passed
|
||||
// to upsertBatch.
|
||||
sessions: schema.sessions,
|
||||
verifications: schema.verifications,
|
||||
admin_tokens: schema.adminTokens,
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* DrizzleMigrationSource */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* MigrationSource backed by a Drizzle DB handle (works with both
|
||||
* PostgresJsDatabase and PgliteDatabase — they share the same Drizzle
|
||||
* query API for schema-defined tables).
|
||||
*
|
||||
* For the `insights` table (the only vector-column table), when the source
|
||||
* lacks pgvector (local/PGlite tier), the `embedding` column is excluded
|
||||
* from the SELECT projection via a raw `db.execute()` query that lists
|
||||
* only non-vector columns. This prevents a type-registration error from
|
||||
* PGlite, which does not know the `vector` type. The caller (runMigrateTier
|
||||
* via normaliseSourceRow) will set embedding to null on the resulting rows.
|
||||
*
|
||||
* Column projection is opt-in: pass `sourceHasVector: false` to activate it.
|
||||
*/
|
||||
export class DrizzleMigrationSource implements MigrationSource {
|
||||
constructor(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
private readonly db: any,
|
||||
private readonly sourceHasVector: boolean = true,
|
||||
) {}
|
||||
|
||||
/**
|
||||
* Columns of the insights table that do NOT include the vector embedding.
|
||||
* Used for the no-pgvector projection path.
|
||||
*/
|
||||
private static readonly INSIGHTS_COLUMNS_NO_VECTOR = [
|
||||
'id',
|
||||
'user_id',
|
||||
'content',
|
||||
'source',
|
||||
'category',
|
||||
'relevance_score',
|
||||
'metadata',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'decayed_at',
|
||||
] as const;
|
||||
|
||||
async readTable(
|
||||
tableName: string,
|
||||
opts?: { limit?: number; offset?: number },
|
||||
): Promise<Record<string, unknown>[]> {
|
||||
const table = TABLE_OBJECTS[tableName];
|
||||
if (!table) throw new Error(`DrizzleMigrationSource: unknown table "${tableName}"`);
|
||||
|
||||
// For vector tables when source lacks pgvector: use column-allowlist raw query
|
||||
// to avoid type-registration errors.
|
||||
if (VECTOR_TABLES.has(tableName) && !this.sourceHasVector) {
|
||||
const cols = DrizzleMigrationSource.INSIGHTS_COLUMNS_NO_VECTOR.map((c) => `"${c}"`).join(
|
||||
', ',
|
||||
);
|
||||
let sql = `SELECT ${cols} FROM "${tableName}"`;
|
||||
const params: unknown[] = [];
|
||||
if (opts?.limit !== undefined) {
|
||||
params.push(opts.limit);
|
||||
sql += ` LIMIT $${params.length.toString()}`;
|
||||
}
|
||||
if (opts?.offset !== undefined) {
|
||||
params.push(opts.offset);
|
||||
sql += ` OFFSET $${params.length.toString()}`;
|
||||
}
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const result = await (this.db as any).execute(
|
||||
// drizzle-orm/pglite and drizzle-orm/postgres-js both accept a raw
|
||||
// SQL template; use the tagged-template sql helper from drizzle-orm.
|
||||
// Since we need dynamic params, we use db.execute with a raw string
|
||||
// via the PGlite/postgres.js driver directly.
|
||||
{ sql, params, typings: [] },
|
||||
);
|
||||
// drizzle execute returns { rows: unknown[][] } for PGlite driver,
|
||||
// or a RowList for postgres.js. Normalise both shapes.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const raw = result as any;
|
||||
if (Array.isArray(raw)) {
|
||||
// postgres.js shape: array of row objects
|
||||
return raw as Record<string, unknown>[];
|
||||
}
|
||||
if (raw?.rows && Array.isArray(raw.rows)) {
|
||||
// PGlite shape: { rows: unknown[][] } OR { rows: Record<string,unknown>[] }
|
||||
const rows = raw.rows as unknown[];
|
||||
if (rows.length === 0) return [];
|
||||
if (Array.isArray(rows[0])) {
|
||||
// Columnar: convert to objects using fields array if available
|
||||
const fields: string[] =
|
||||
(raw.fields as Array<{ name: string }> | undefined)?.map((f) => f.name) ??
|
||||
DrizzleMigrationSource.INSIGHTS_COLUMNS_NO_VECTOR.slice();
|
||||
return (rows as unknown[][]).map((row) => {
|
||||
const obj: Record<string, unknown> = {};
|
||||
for (let i = 0; i < fields.length; i++) {
|
||||
obj[fields[i]!] = row[i];
|
||||
}
|
||||
return obj;
|
||||
});
|
||||
}
|
||||
return rows as Record<string, unknown>[];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
// Standard Drizzle select for all other tables (and vector tables when
|
||||
// the source has pgvector registered).
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
let query = (this.db as any).select().from(table);
|
||||
if (opts?.limit !== undefined) query = query.limit(opts.limit);
|
||||
if (opts?.offset !== undefined) query = query.offset(opts.offset);
|
||||
return (await query) as Record<string, unknown>[];
|
||||
}
|
||||
|
||||
async count(tableName: string): Promise<number> {
|
||||
const table = TABLE_OBJECTS[tableName];
|
||||
if (!table) throw new Error(`DrizzleMigrationSource: unknown table "${tableName}"`);
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
const [row] = await (this.db as any)
|
||||
.select({ n: drizzleSql<number>`COUNT(*)::int` })
|
||||
.from(table);
|
||||
return (row as { n: number } | undefined)?.n ?? 0;
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
// Lifecycle managed externally — caller closes the db handle.
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Real postgres target adapter */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Live implementation of MigrationTarget backed by a real Postgres connection.
|
||||
* Used by the CLI; mocked in tests.
|
||||
*/
|
||||
export class PostgresMigrationTarget implements MigrationTarget {
|
||||
private sql: ReturnType<typeof postgres>;
|
||||
|
||||
constructor(url: string) {
|
||||
this.sql = postgres(url, {
|
||||
max: 5,
|
||||
connect_timeout: 10,
|
||||
idle_timeout: 30,
|
||||
});
|
||||
}
|
||||
|
||||
async upsertBatch(tableName: string, rows: Record<string, unknown>[]): Promise<void> {
|
||||
if (rows.length === 0) return;
|
||||
|
||||
// Collect all column names from the batch (union of all row keys).
|
||||
const colSet = new Set<string>();
|
||||
for (const row of rows) {
|
||||
for (const k of Object.keys(row)) colSet.add(k);
|
||||
}
|
||||
const cols = [...colSet];
|
||||
if (!cols.includes('id')) {
|
||||
throw new Error(`Table ${tableName}: rows missing 'id' column`);
|
||||
}
|
||||
|
||||
// Build VALUES list — use postgres tagged-template helpers for safety.
|
||||
// postgres.js supports bulk inserts via array of objects.
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
await this.sql.begin(async (tx: any) => {
|
||||
// Insert in chunks to avoid enormous single queries.
|
||||
for (let i = 0; i < rows.length; i += 500) {
|
||||
const chunk = rows.slice(i, i + 500);
|
||||
|
||||
// Normalise rows: fill missing columns with null.
|
||||
const normalised = chunk.map((row) => {
|
||||
const out: Record<string, unknown> = {};
|
||||
for (const col of cols) {
|
||||
out[col] = row[col] ?? null;
|
||||
}
|
||||
return out;
|
||||
});
|
||||
|
||||
const colList = cols.map((c) => `"${c}"`).join(', ');
|
||||
const updateList = cols
|
||||
.filter((c) => c !== 'id')
|
||||
.map((c) => `"${c}" = EXCLUDED."${c}"`)
|
||||
.join(', ');
|
||||
|
||||
// Build values placeholders
|
||||
const valuePlaceholders = normalised
|
||||
.map((_, ri) => `(${cols.map((_, ci) => `$${ri * cols.length + ci + 1}`).join(', ')})`)
|
||||
.join(', ');
|
||||
|
||||
const flatValues = normalised.flatMap((row) => cols.map((c) => row[c] ?? null));
|
||||
|
||||
const query = `
|
||||
INSERT INTO "${tableName}" (${colList})
|
||||
VALUES ${valuePlaceholders}
|
||||
ON CONFLICT (id) DO UPDATE SET ${updateList}
|
||||
`;
|
||||
|
||||
await tx.unsafe(query, flatValues as never[]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async count(tableName: string): Promise<number> {
|
||||
const rows = await this.sql.unsafe(`SELECT COUNT(*)::int AS n FROM "${tableName}"`);
|
||||
const row = rows[0] as unknown as { n: number } | undefined;
|
||||
return row?.n ?? 0;
|
||||
}
|
||||
|
||||
async hasPgvector(): Promise<boolean> {
|
||||
const rows = await this.sql`
|
||||
SELECT 1 FROM pg_extension WHERE extname = 'vector'
|
||||
`;
|
||||
return rows.length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to acquire a non-blocking session-level Postgres advisory lock
|
||||
* keyed by hashtext('mosaic-migrate-tier'). Returns true if acquired,
|
||||
* false if another session already holds the lock.
|
||||
*
|
||||
* The lock is session-scoped: it is automatically released when the
|
||||
* connection closes, and also explicitly released via releaseAdvisoryLock().
|
||||
*/
|
||||
async tryAcquireAdvisoryLock(): Promise<boolean> {
|
||||
const rows = await this.sql`
|
||||
SELECT pg_try_advisory_lock(hashtext('mosaic-migrate-tier')) AS acquired
|
||||
`;
|
||||
const row = rows[0] as { acquired: boolean } | undefined;
|
||||
return row?.acquired ?? false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Release the session-level advisory lock previously acquired by
|
||||
* tryAcquireAdvisoryLock(). Safe to call even if the lock was not held
|
||||
* (pg_advisory_unlock returns false but does not throw).
|
||||
*/
|
||||
async releaseAdvisoryLock(): Promise<void> {
|
||||
await this.sql`
|
||||
SELECT pg_advisory_unlock(hashtext('mosaic-migrate-tier'))
|
||||
`;
|
||||
}
|
||||
|
||||
async close(): Promise<void> {
|
||||
await this.sql.end();
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Source-row normalisation */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Convert a camelCase key to snake_case.
|
||||
* e.g. "userId" → "user_id", "emailVerified" → "email_verified".
|
||||
* Keys that are already snake_case (no uppercase letters) are returned as-is.
|
||||
*/
|
||||
function toSnakeCase(key: string): string {
|
||||
return key.replace(/[A-Z]/g, (c) => `_${c.toLowerCase()}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Drizzle returns rows as camelCase TypeScript objects (e.g. `userId`, not
|
||||
* `user_id`). The PostgresMigrationTarget upserts via raw SQL and uses the
|
||||
* column names as given. We must convert camelCase keys → snake_case before
|
||||
* building the INSERT statement so column names match the PG schema.
|
||||
*
|
||||
* Exception: the `insights` no-vector path already returns snake_case keys
|
||||
* from its raw SQL projection — toSnakeCase() is idempotent for already-
|
||||
* snake_case keys so this conversion is safe in all paths.
|
||||
*
|
||||
* For vector tables (insights), if `embedding` is absent from the source row
|
||||
* (because DrizzleMigrationSource omitted it in the no-vector projection), we
|
||||
* explicitly set it to null so the target ON CONFLICT UPDATE doesn't error.
|
||||
*
|
||||
* NOTE: insights.embedding is defined as `vector('embedding', { dimensions:
|
||||
* 1536 })` with no `.notNull()` in schema.ts — it accepts NULL.
|
||||
*/
|
||||
export function normaliseSourceRow(
|
||||
tableName: string,
|
||||
row: Record<string, unknown>,
|
||||
sourceHasVector: boolean,
|
||||
): Record<string, unknown> {
|
||||
// Convert all camelCase keys to snake_case for raw-SQL target compatibility.
|
||||
const out: Record<string, unknown> = {};
|
||||
for (const [k, v] of Object.entries(row)) {
|
||||
out[toSnakeCase(k)] = v;
|
||||
}
|
||||
|
||||
if (VECTOR_TABLES.has(tableName) && !sourceHasVector) {
|
||||
// Source cannot have embeddings — explicitly null them so ON CONFLICT
|
||||
// UPDATE doesn't try to write undefined.
|
||||
out['embedding'] = null;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Precondition checks */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export class MigrationPreconditionError extends Error {
|
||||
constructor(
|
||||
message: string,
|
||||
public readonly remediation: string,
|
||||
) {
|
||||
super(message);
|
||||
this.name = 'MigrationPreconditionError';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify target preconditions before writing any data.
|
||||
*
|
||||
* Checks:
|
||||
* 1. pgvector extension installed.
|
||||
* 2. User-data tables are empty (unless --allow-non-empty).
|
||||
*/
|
||||
export async function checkTargetPreconditions(
|
||||
target: MigrationTarget,
|
||||
allowNonEmpty: boolean,
|
||||
tablesToMigrate: string[],
|
||||
): Promise<void> {
|
||||
const hasVector = await target.hasPgvector();
|
||||
if (!hasVector) {
|
||||
throw new MigrationPreconditionError(
|
||||
'Target Postgres does not have the pgvector extension installed.',
|
||||
'Run: CREATE EXTENSION IF NOT EXISTS vector; — or use the pgvector/pgvector:pg17 Docker image.',
|
||||
);
|
||||
}
|
||||
|
||||
if (!allowNonEmpty) {
|
||||
// Check the first non-empty user-data table.
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = await target.count(table);
|
||||
if (n > 0) {
|
||||
throw new MigrationPreconditionError(
|
||||
`Target table "${table}" already contains ${n.toString()} rows.`,
|
||||
'Pass --allow-non-empty to overwrite existing data (upsert semantics), ' +
|
||||
'or point to an empty target database.',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Core migration runner */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Run the tier migration.
|
||||
*
|
||||
* @param source Adapter for reading source rows.
|
||||
* @param target Adapter for writing rows to target.
|
||||
* @param opts Migration options.
|
||||
* @param sourceHasVector True if the source tier supports vector columns.
|
||||
*/
|
||||
export async function runMigrateTier(
|
||||
source: MigrationSource,
|
||||
target: MigrationTarget,
|
||||
opts: MigrateTierOptions,
|
||||
sourceHasVector = false,
|
||||
): Promise<MigrateTierResult> {
|
||||
const { dryRun, allowNonEmpty, batchSize, onProgress } = opts;
|
||||
|
||||
const tablesToMigrate = getMigrationOrder();
|
||||
|
||||
// Preflight: gather row counts from source.
|
||||
onProgress('[migrate-tier] Gathering source row counts...');
|
||||
const sourceCounts = new Map<string, number>();
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = await source.count(table);
|
||||
sourceCounts.set(table, n);
|
||||
}
|
||||
|
||||
// Log preflight summary.
|
||||
onProgress('[migrate-tier] Tables to migrate:');
|
||||
for (const table of tablesToMigrate) {
|
||||
const n = sourceCounts.get(table) ?? 0;
|
||||
onProgress(` ${table}: ${n.toString()} rows`);
|
||||
}
|
||||
for (const skipped of SKIP_TABLES) {
|
||||
onProgress(` ${skipped}: SKIPPED (ephemeral/environment-specific)`);
|
||||
}
|
||||
|
||||
// Vector column notice.
|
||||
if (!sourceHasVector) {
|
||||
onProgress(
|
||||
'[migrate-tier] NOTE: Source tier has no pgvector support. ' +
|
||||
'insights.embedding will be NULL on all migrated rows.',
|
||||
);
|
||||
}
|
||||
|
||||
if (dryRun) {
|
||||
onProgress('[migrate-tier] DRY RUN — no writes will be made.');
|
||||
const tables: TableMigrationResult[] = tablesToMigrate.map((t) => ({
|
||||
table: t,
|
||||
rowsMigrated: 0,
|
||||
skipped: false,
|
||||
}));
|
||||
for (const skipped of SKIP_TABLES) {
|
||||
tables.push({ table: skipped, rowsMigrated: 0, skipped: true, skipReason: 'ephemeral' });
|
||||
}
|
||||
return { tables, totalRows: 0, dryRun: true };
|
||||
}
|
||||
|
||||
// Acquire a Postgres advisory lock on the target BEFORE checking preconditions
|
||||
// so that two concurrent invocations cannot both pass the non-empty guard and
|
||||
// race each other. Use non-blocking pg_try_advisory_lock so we fail fast
|
||||
// instead of deadlocking.
|
||||
//
|
||||
// Targets that don't implement tryAcquireAdvisoryLock (e.g. test mocks) skip
|
||||
// this step — the optional chaining guard handles that case.
|
||||
const lockAcquired = target.tryAcquireAdvisoryLock ? await target.tryAcquireAdvisoryLock() : true; // mocks / test doubles — no locking needed
|
||||
|
||||
if (!lockAcquired) {
|
||||
throw new Error(
|
||||
'Another migrate-tier process is already running against this target. ' +
|
||||
'Wait for it to complete or check for stuck locks via ' +
|
||||
"SELECT * FROM pg_locks WHERE locktype='advisory'.",
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
// Check preconditions before writing.
|
||||
await checkTargetPreconditions(target, allowNonEmpty, tablesToMigrate);
|
||||
|
||||
const results: TableMigrationResult[] = [];
|
||||
let totalRows = 0;
|
||||
|
||||
for (const table of tablesToMigrate) {
|
||||
const sourceCount = sourceCounts.get(table) ?? 0;
|
||||
|
||||
if (sourceCount === 0) {
|
||||
onProgress(`[migrate-tier] ${table}: 0 rows — skipping.`);
|
||||
results.push({ table, rowsMigrated: 0, skipped: false });
|
||||
continue;
|
||||
}
|
||||
|
||||
onProgress(`[migrate-tier] ${table}: migrating ${sourceCount.toString()} rows...`);
|
||||
|
||||
let offset = 0;
|
||||
let tableTotal = 0;
|
||||
let lastSuccessfulId: string | undefined;
|
||||
|
||||
try {
|
||||
while (offset < sourceCount) {
|
||||
const rows = await source.readTable(table, { limit: batchSize, offset });
|
||||
if (rows.length === 0) break;
|
||||
|
||||
const normalised = rows.map((row) => normaliseSourceRow(table, row, sourceHasVector));
|
||||
|
||||
await target.upsertBatch(table, normalised);
|
||||
|
||||
lastSuccessfulId = rows[rows.length - 1]?.['id'] as string | undefined;
|
||||
tableTotal += rows.length;
|
||||
offset += rows.length;
|
||||
|
||||
onProgress(
|
||||
`[migrate-tier] ${table}: ${tableTotal.toString()}/${sourceCount.toString()} rows written`,
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
const errMsg = redactErrMsg(err instanceof Error ? err.message : String(err));
|
||||
throw new Error(
|
||||
`[migrate-tier] Failed on table "${table}" after ${tableTotal.toString()} rows ` +
|
||||
`(last id: ${lastSuccessfulId ?? 'none'}). Error: ${errMsg}\n` +
|
||||
`Remediation: Re-run with --allow-non-empty to resume (upsert is idempotent).`,
|
||||
);
|
||||
}
|
||||
|
||||
results.push({ table, rowsMigrated: tableTotal, skipped: false });
|
||||
totalRows += tableTotal;
|
||||
onProgress(`[migrate-tier] ${table}: done (${tableTotal.toString()} rows).`);
|
||||
}
|
||||
|
||||
// Add skipped table records.
|
||||
for (const skipped of SKIP_TABLES) {
|
||||
results.push({
|
||||
table: skipped,
|
||||
rowsMigrated: 0,
|
||||
skipped: true,
|
||||
skipReason: 'ephemeral or environment-specific — re-issue on target',
|
||||
});
|
||||
}
|
||||
|
||||
onProgress(`[migrate-tier] Complete. ${totalRows.toString()} total rows migrated.`);
|
||||
return { tables: results, totalRows, dryRun: false };
|
||||
} finally {
|
||||
// Release the advisory lock regardless of success or failure.
|
||||
if (target.releaseAdvisoryLock) {
|
||||
await target.releaseAdvisoryLock();
|
||||
}
|
||||
}
|
||||
}
|
||||
62
packages/storage/src/redact-error.test.ts
Normal file
62
packages/storage/src/redact-error.test.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { redactErrMsg } from './redact-error.js';
|
||||
|
||||
describe('redactErrMsg', () => {
|
||||
it('redacts user:password from a postgres:// URL embedded in an error message', () => {
|
||||
const msg = 'connect ECONNREFUSED postgres://admin:s3cr3t@db.example.com:5432/mosaic';
|
||||
expect(redactErrMsg(msg)).toBe(
|
||||
'connect ECONNREFUSED postgres://***@db.example.com:5432/mosaic',
|
||||
);
|
||||
});
|
||||
|
||||
it('redacts user:password from a postgresql:// URL', () => {
|
||||
const msg = 'connection failed: postgresql://myuser:mypass@localhost:5432/testdb';
|
||||
expect(redactErrMsg(msg)).toBe('connection failed: postgresql://***@localhost:5432/testdb');
|
||||
});
|
||||
|
||||
it('handles URLs with no password (user only) — still redacts userinfo', () => {
|
||||
const msg = 'error postgres://justuser@host:5432/db';
|
||||
expect(redactErrMsg(msg)).toBe('error postgres://***@host:5432/db');
|
||||
});
|
||||
|
||||
it('returns the original message unchanged when no connection URL is present', () => {
|
||||
const msg = 'connection timed out after 5 seconds';
|
||||
expect(redactErrMsg(msg)).toBe('connection timed out after 5 seconds');
|
||||
});
|
||||
|
||||
it('is case-insensitive for the scheme (scheme is normalized to lowercase in output)', () => {
|
||||
// The regex replacement uses a lowercase literal, so the matched scheme is
|
||||
// replaced with the lowercase form regardless of the original casing.
|
||||
const msg = 'POSTGRES://admin:pass@host:5432/db';
|
||||
expect(redactErrMsg(msg)).toBe('postgres://***@host:5432/db');
|
||||
});
|
||||
|
||||
it('redacts multiple URLs in a single message', () => {
|
||||
const msg = 'src postgres://u:p@host1/db1 dst postgresql://v:q@host2/db2';
|
||||
expect(redactErrMsg(msg)).toBe('src postgres://***@host1/db1 dst postgresql://***@host2/db2');
|
||||
});
|
||||
|
||||
it('does not alter a message with a postgres URL that has no userinfo', () => {
|
||||
// No userinfo component — pattern does not match, message unchanged.
|
||||
const msg = 'error at postgres://host:5432/db';
|
||||
expect(redactErrMsg(msg)).toBe('error at postgres://host:5432/db');
|
||||
});
|
||||
|
||||
it('redacts user:password from a redis:// URL', () => {
|
||||
const msg = 'connect ECONNREFUSED redis://user:pass@host:6379';
|
||||
expect(redactErrMsg(msg)).toBe('connect ECONNREFUSED redis://***@host:6379');
|
||||
});
|
||||
|
||||
it('redacts user:password from a rediss:// URL (TLS)', () => {
|
||||
const msg = 'connect ECONNREFUSED rediss://user:pass@host:6379';
|
||||
expect(redactErrMsg(msg)).toBe('connect ECONNREFUSED rediss://***@host:6379');
|
||||
});
|
||||
|
||||
it('redacts both a postgres URL and a redis URL in the same message', () => {
|
||||
const msg =
|
||||
'primary postgres://admin:s3cr3t@db:5432/mosaic cache redis://cacheuser:cachepass@cache:6379';
|
||||
expect(redactErrMsg(msg)).toBe(
|
||||
'primary postgres://***@db:5432/mosaic cache redis://***@cache:6379',
|
||||
);
|
||||
});
|
||||
});
|
||||
39
packages/storage/src/redact-error.ts
Normal file
39
packages/storage/src/redact-error.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
/**
|
||||
* redact-error.ts — Internal credential-scrubbing helper.
|
||||
*
|
||||
* The `postgres` npm package can embed the full DSN (including the password)
|
||||
* in connection-failure error messages. This module provides a single helper
|
||||
* that strips the user:password portion from any such message before it is
|
||||
* re-thrown, logged, or surfaced in a structured health report.
|
||||
*
|
||||
* This file is intentionally NOT re-exported from the package index — it is
|
||||
* an internal utility for use within packages/storage/src only.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Redacts credentials from error messages that may include connection URLs.
|
||||
* The `postgres` npm package can embed the full DSN in connection-failure
|
||||
* messages, and ioredis can embed `redis://` / `rediss://` URLs similarly.
|
||||
* This helper strips the user:password portion before display.
|
||||
*
|
||||
* Handles `postgres://`, `postgresql://`, `redis://`, and `rediss://`
|
||||
* schemes (case-insensitive). Everything between `://` and `@` (the userinfo
|
||||
* component) is replaced with `***` so that the host, port, and database name
|
||||
* remain visible for diagnostics while the secret is never written to logs or
|
||||
* CI output.
|
||||
*
|
||||
* @example
|
||||
* redactErrMsg('connect ECONNREFUSED postgres://admin:s3cr3t@db:5432/mosaic')
|
||||
* // → 'connect ECONNREFUSED postgres://***@db:5432/mosaic'
|
||||
*
|
||||
* redactErrMsg('connect ECONNREFUSED redis://user:pass@cache:6379')
|
||||
* // → 'connect ECONNREFUSED redis://***@cache:6379'
|
||||
*/
|
||||
const CREDENTIAL_URL_RE = /(postgres(?:ql)?|rediss?):\/\/[^@\s]*@/gi;
|
||||
|
||||
export function redactErrMsg(msg: string): string {
|
||||
return msg.replace(
|
||||
CREDENTIAL_URL_RE,
|
||||
(_match, scheme: string) => `${scheme.toLowerCase()}://***@`,
|
||||
);
|
||||
}
|
||||
52
packages/storage/src/test-utils/pglite-with-vector.ts
Normal file
52
packages/storage/src/test-utils/pglite-with-vector.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
/**
|
||||
* Test-only helpers for creating a PGlite database with the pgvector extension
|
||||
* and running Drizzle migrations against it.
|
||||
*
|
||||
* These are intentionally NOT exported from @mosaicstack/db to avoid pulling
|
||||
* the WASM vector bundle into the public API surface.
|
||||
*/
|
||||
|
||||
import { createRequire } from 'node:module';
|
||||
import { dirname, resolve } from 'node:path';
|
||||
|
||||
import { PGlite } from '@electric-sql/pglite';
|
||||
import { vector } from '@electric-sql/pglite/vector';
|
||||
import { drizzle } from 'drizzle-orm/pglite';
|
||||
import { migrate as migratePglite } from 'drizzle-orm/pglite/migrator';
|
||||
import type { PgliteDatabase } from 'drizzle-orm/pglite';
|
||||
import * as schema from '@mosaicstack/db';
|
||||
import type { DbHandle } from '@mosaicstack/db';
|
||||
|
||||
/**
|
||||
* Create a PGlite DB handle with the pgvector extension loaded.
|
||||
* Required for running Drizzle migrations that include `CREATE EXTENSION vector`.
|
||||
*/
|
||||
export function createPgliteDbWithVector(dataDir: string): DbHandle {
|
||||
const client = new PGlite(dataDir, { extensions: { vector } });
|
||||
const db = drizzle(client, { schema });
|
||||
return {
|
||||
db: db as unknown as DbHandle['db'],
|
||||
close: async () => {
|
||||
await client.close();
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run Drizzle migrations against an already-open PGlite database handle.
|
||||
* Resolves the migrations folder from @mosaicstack/db's installed location.
|
||||
*
|
||||
* @param db A PgliteDatabase instance (from drizzle-orm/pglite).
|
||||
*/
|
||||
export async function runPgliteMigrations(
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
db: PgliteDatabase<any>,
|
||||
): Promise<void> {
|
||||
// Resolve @mosaicstack/db package root to locate its drizzle migrations folder.
|
||||
const _require = createRequire(import.meta.url);
|
||||
const dbPkgMain = _require.resolve('@mosaicstack/db');
|
||||
// dbPkgMain → …/packages/db/dist/index.js → dirname = dist/
|
||||
// go up one level from dist/ to find the sibling drizzle/ folder
|
||||
const migrationsFolder = resolve(dirname(dbPkgMain), '../drizzle');
|
||||
await migratePglite(db, { migrationsFolder });
|
||||
}
|
||||
546
packages/storage/src/tier-detection.spec.ts
Normal file
546
packages/storage/src/tier-detection.spec.ts
Normal file
@@ -0,0 +1,546 @@
|
||||
/**
|
||||
* Unit tests for tier-detection.ts.
|
||||
*
|
||||
* All external I/O (postgres, ioredis) is mocked — no live services required.
|
||||
*
|
||||
* Note on hoisting: vi.mock() factories are hoisted above all imports by vitest.
|
||||
* Variables referenced inside factory callbacks must be declared via vi.hoisted()
|
||||
* so they are available at hoist time.
|
||||
*/
|
||||
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Hoist shared mock state so factories can reference it */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const mocks = vi.hoisted(() => {
|
||||
const mockSqlFn = vi.fn();
|
||||
const mockEnd = vi.fn().mockResolvedValue(undefined);
|
||||
const mockPostgresConstructor = vi.fn(() => {
|
||||
const sql = mockSqlFn as ReturnType<typeof mockSqlFn>;
|
||||
(sql as unknown as Record<string, unknown>)['end'] = mockEnd;
|
||||
return sql;
|
||||
});
|
||||
|
||||
const mockRedisConnect = vi.fn().mockResolvedValue(undefined);
|
||||
const mockRedisPing = vi.fn().mockResolvedValue('PONG');
|
||||
const mockRedisDisconnect = vi.fn();
|
||||
const MockRedis = vi.fn().mockImplementation(() => ({
|
||||
connect: mockRedisConnect,
|
||||
ping: mockRedisPing,
|
||||
disconnect: mockRedisDisconnect,
|
||||
}));
|
||||
|
||||
return {
|
||||
mockSqlFn,
|
||||
mockEnd,
|
||||
mockPostgresConstructor,
|
||||
mockRedisConnect,
|
||||
mockRedisPing,
|
||||
mockRedisDisconnect,
|
||||
MockRedis,
|
||||
};
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Module mocks (registered at hoist time) */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
vi.mock('postgres', () => ({
|
||||
default: mocks.mockPostgresConstructor,
|
||||
}));
|
||||
|
||||
vi.mock('ioredis', () => ({
|
||||
Redis: mocks.MockRedis,
|
||||
}));
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Import SUT after mocks are registered */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
import { detectAndAssertTier, probeServiceHealth, TierDetectionError } from './tier-detection.js';
|
||||
import type { TierConfig } from './tier-detection.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Config fixtures */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const LOCAL_CONFIG: TierConfig = {
|
||||
tier: 'local',
|
||||
storage: { type: 'pglite', dataDir: '.mosaic/pglite' },
|
||||
queue: { type: 'local' },
|
||||
};
|
||||
|
||||
const STANDALONE_CONFIG: TierConfig = {
|
||||
tier: 'standalone',
|
||||
storage: { type: 'postgres', url: 'postgresql://mosaic:mosaic@db-host:5432/mosaic' },
|
||||
queue: { type: 'bullmq', url: 'redis://valkey-host:6380' },
|
||||
};
|
||||
|
||||
const FEDERATED_CONFIG: TierConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'bullmq', url: 'redis://valkey-host:6380' },
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Tests */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('detectAndAssertTier', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
// Default: all probes succeed.
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockEnd.mockResolvedValue(undefined);
|
||||
mocks.mockRedisConnect.mockResolvedValue(undefined);
|
||||
mocks.mockRedisPing.mockResolvedValue('PONG');
|
||||
|
||||
// Re-wire constructor to return a fresh sql-like object each time.
|
||||
mocks.mockPostgresConstructor.mockImplementation(() => {
|
||||
const sql = mocks.mockSqlFn as ReturnType<typeof mocks.mockSqlFn>;
|
||||
(sql as unknown as Record<string, unknown>)['end'] = mocks.mockEnd;
|
||||
return sql;
|
||||
});
|
||||
mocks.MockRedis.mockImplementation(() => ({
|
||||
connect: mocks.mockRedisConnect,
|
||||
ping: mocks.mockRedisPing,
|
||||
disconnect: mocks.mockRedisDisconnect,
|
||||
}));
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 1. local — no-op */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('resolves immediately for tier=local without touching postgres or ioredis', async () => {
|
||||
await expect(detectAndAssertTier(LOCAL_CONFIG)).resolves.toBeUndefined();
|
||||
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 2. standalone — happy path */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('resolves for tier=standalone when postgres and valkey are reachable', async () => {
|
||||
await expect(detectAndAssertTier(STANDALONE_CONFIG)).resolves.toBeUndefined();
|
||||
|
||||
// Postgres was probed (SELECT 1 only — no pgvector check).
|
||||
expect(mocks.mockPostgresConstructor).toHaveBeenCalledTimes(1);
|
||||
expect(mocks.mockSqlFn).toHaveBeenCalledTimes(1);
|
||||
// Valkey was probed.
|
||||
expect(mocks.MockRedis).toHaveBeenCalledTimes(1);
|
||||
expect(mocks.mockRedisPing).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 3. standalone — postgres unreachable */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=postgres when postgres query rejects', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValueOnce(new Error('connection refused'));
|
||||
|
||||
const promise = detectAndAssertTier(STANDALONE_CONFIG);
|
||||
await expect(promise).rejects.toBeInstanceOf(TierDetectionError);
|
||||
|
||||
// Confirm no valkey probe happened (fail fast on first error).
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('sets service=postgres on the error when postgres fails', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('postgres');
|
||||
expect(typed.remediation).toContain('docker compose');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 4. standalone — valkey unreachable */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=valkey when ping fails', async () => {
|
||||
// Postgres probe succeeds; valkey connect fails.
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockRedisConnect.mockRejectedValue(new Error('ECONNREFUSED'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('valkey');
|
||||
expect(typed.message).toContain('valkey');
|
||||
expect(typed.remediation).toContain('valkey-federated');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 5. federated — happy path */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('resolves for tier=federated when all three checks pass', async () => {
|
||||
// SELECT 1 and CREATE EXTENSION both succeed.
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
|
||||
await expect(detectAndAssertTier(FEDERATED_CONFIG)).resolves.toBeUndefined();
|
||||
|
||||
// postgres probe (SELECT 1) + pgvector probe (CREATE EXTENSION) = 2 postgres constructors.
|
||||
expect(mocks.mockPostgresConstructor).toHaveBeenCalledTimes(2);
|
||||
expect(mocks.mockSqlFn).toHaveBeenCalledTimes(2);
|
||||
// Valkey probed once.
|
||||
expect(mocks.MockRedis).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 6. federated — pgvector not installable */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=pgvector when CREATE EXTENSION fails', async () => {
|
||||
// SELECT 1 succeeds (first call), CREATE EXTENSION fails (second call).
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // SELECT 1
|
||||
.mockRejectedValueOnce(new Error('extension "vector" is not available'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(FEDERATED_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('pgvector');
|
||||
expect(typed.message).toContain('pgvector');
|
||||
expect(typed.remediation).toContain('pgvector/pgvector');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 7. probeValkey honors connectTimeout and lazyConnect */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('constructs the ioredis Redis client with connectTimeout: 5000', async () => {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
|
||||
expect(mocks.MockRedis).toHaveBeenCalledOnce();
|
||||
expect(mocks.MockRedis).toHaveBeenCalledWith(
|
||||
expect.any(String),
|
||||
expect.objectContaining({ connectTimeout: 5000, lazyConnect: true }),
|
||||
);
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 8. probePgvector — library-not-installed remediation */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('includes pgvector/pgvector:pg17 in remediation when pgvector library is missing', async () => {
|
||||
// SELECT 1 succeeds; CREATE EXTENSION fails with the canonical library-missing message.
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // SELECT 1 (probePostgres)
|
||||
.mockRejectedValueOnce(new Error('extension "vector" is not available')); // probePgvector
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(FEDERATED_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('pgvector');
|
||||
expect(typed.remediation).toContain('pgvector/pgvector:pg17');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 9. probePgvector — permission / other error remediation */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('mentions CREATE permission or superuser in remediation for a generic pgvector error', async () => {
|
||||
// SELECT 1 succeeds; CREATE EXTENSION fails with a permission error.
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // SELECT 1 (probePostgres)
|
||||
.mockRejectedValueOnce(new Error('permission denied to create extension'));
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(FEDERATED_CONFIG);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('pgvector');
|
||||
// Must NOT point to the image fix — that's only for the library-missing case.
|
||||
expect(typed.remediation).not.toContain('pgvector/pgvector:pg17');
|
||||
// Must mention permissions or superuser.
|
||||
expect(typed.remediation).toMatch(/CREATE|superuser/i);
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 10. federated tier rejects non-bullmq queue.type */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('throws TierDetectionError with service=config for federated tier with queue.type !== bullmq', async () => {
|
||||
const badConfig: TierConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'local' },
|
||||
};
|
||||
|
||||
try {
|
||||
await detectAndAssertTier(badConfig);
|
||||
expect.fail('should have thrown');
|
||||
} catch (err) {
|
||||
expect(err).toBeInstanceOf(TierDetectionError);
|
||||
const typed = err as TierDetectionError;
|
||||
expect(typed.service).toBe('config');
|
||||
expect(typed.remediation).toContain('bullmq');
|
||||
}
|
||||
|
||||
// No network probes should have been attempted.
|
||||
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 11. Error fields populated */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('populates host, port, and remediation on a thrown TierDetectionError', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
|
||||
|
||||
let caught: TierDetectionError | undefined;
|
||||
try {
|
||||
await detectAndAssertTier(STANDALONE_CONFIG);
|
||||
} catch (err) {
|
||||
caught = err as TierDetectionError;
|
||||
}
|
||||
|
||||
expect(caught).toBeInstanceOf(TierDetectionError);
|
||||
expect(caught!.service).toBe('postgres');
|
||||
// Host and port are extracted from the Postgres URL in STANDALONE_CONFIG.
|
||||
expect(caught!.host).toBe('db-host');
|
||||
expect(caught!.port).toBe(5432);
|
||||
expect(caught!.remediation).toMatch(/docker compose/i);
|
||||
expect(caught!.message).toContain('db-host:5432');
|
||||
});
|
||||
});
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* probeServiceHealth tests */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
describe('probeServiceHealth', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockEnd.mockResolvedValue(undefined);
|
||||
mocks.mockRedisConnect.mockResolvedValue(undefined);
|
||||
mocks.mockRedisPing.mockResolvedValue('PONG');
|
||||
|
||||
mocks.mockPostgresConstructor.mockImplementation(() => {
|
||||
const sql = mocks.mockSqlFn as ReturnType<typeof mocks.mockSqlFn>;
|
||||
(sql as unknown as Record<string, unknown>)['end'] = mocks.mockEnd;
|
||||
return sql;
|
||||
});
|
||||
mocks.MockRedis.mockImplementation(() => ({
|
||||
connect: mocks.mockRedisConnect,
|
||||
ping: mocks.mockRedisPing,
|
||||
disconnect: mocks.mockRedisDisconnect,
|
||||
}));
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 12. local tier — all skipped, green */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns all services as skipped and overall green for local tier', async () => {
|
||||
const report = await probeServiceHealth(LOCAL_CONFIG);
|
||||
|
||||
expect(report.tier).toBe('local');
|
||||
expect(report.overall).toBe('green');
|
||||
expect(report.services).toHaveLength(3);
|
||||
for (const svc of report.services) {
|
||||
expect(svc.status).toBe('skipped');
|
||||
}
|
||||
expect(mocks.mockPostgresConstructor).not.toHaveBeenCalled();
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 13. postgres fails, valkey ok → red */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red overall with postgres fail and valkey ok for standalone when postgres fails', async () => {
|
||||
mocks.mockSqlFn.mockRejectedValue(new Error('connection refused'));
|
||||
|
||||
const report = await probeServiceHealth(STANDALONE_CONFIG);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const pgCheck = report.services.find((s) => s.name === 'postgres');
|
||||
expect(pgCheck?.status).toBe('fail');
|
||||
expect(pgCheck?.error).toBeDefined();
|
||||
expect(pgCheck?.error?.remediation).toContain('docker compose');
|
||||
|
||||
const vkCheck = report.services.find((s) => s.name === 'valkey');
|
||||
expect(vkCheck?.status).toBe('ok');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 14. federated all green → 3 services ok */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns green overall with all 3 services ok for federated when all pass', async () => {
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
|
||||
const report = await probeServiceHealth(FEDERATED_CONFIG);
|
||||
|
||||
expect(report.tier).toBe('federated');
|
||||
expect(report.overall).toBe('green');
|
||||
expect(report.services).toHaveLength(3);
|
||||
|
||||
for (const svc of report.services) {
|
||||
expect(svc.status).toBe('ok');
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 15. durationMs is a non-negative number for every service check */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('sets durationMs as a non-negative number for every service check', async () => {
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
|
||||
const report = await probeServiceHealth(FEDERATED_CONFIG);
|
||||
|
||||
for (const svc of report.services) {
|
||||
expect(typeof svc.durationMs).toBe('number');
|
||||
expect(svc.durationMs).toBeGreaterThanOrEqual(0);
|
||||
}
|
||||
});
|
||||
|
||||
it('sets durationMs >= 0 even for skipped services (local tier)', async () => {
|
||||
const report = await probeServiceHealth(LOCAL_CONFIG);
|
||||
|
||||
for (const svc of report.services) {
|
||||
expect(typeof svc.durationMs).toBe('number');
|
||||
expect(svc.durationMs).toBeGreaterThanOrEqual(0);
|
||||
}
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 16. configPath is passed through to the report */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('includes configPath in the report when provided', async () => {
|
||||
const report = await probeServiceHealth(LOCAL_CONFIG, '/etc/mosaic/mosaic.config.json');
|
||||
expect(report.configPath).toBe('/etc/mosaic/mosaic.config.json');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 17. standalone — valkey fails, postgres ok → red */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red with valkey fail and postgres ok for standalone when valkey fails', async () => {
|
||||
mocks.mockSqlFn.mockResolvedValue([]);
|
||||
mocks.mockRedisConnect.mockRejectedValue(new Error('ECONNREFUSED'));
|
||||
|
||||
const report = await probeServiceHealth(STANDALONE_CONFIG);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const pgCheck = report.services.find((s) => s.name === 'postgres');
|
||||
expect(pgCheck?.status).toBe('ok');
|
||||
|
||||
const vkCheck = report.services.find((s) => s.name === 'valkey');
|
||||
expect(vkCheck?.status).toBe('fail');
|
||||
expect(vkCheck?.error).toBeDefined();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 18. federated — pgvector fails → red with remediation */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red with pgvector fail for federated when pgvector probe fails', async () => {
|
||||
mocks.mockSqlFn
|
||||
.mockResolvedValueOnce([]) // postgres SELECT 1
|
||||
.mockRejectedValueOnce(new Error('extension "vector" is not available')); // pgvector
|
||||
|
||||
const report = await probeServiceHealth(FEDERATED_CONFIG);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const pvCheck = report.services.find((s) => s.name === 'pgvector');
|
||||
expect(pvCheck?.status).toBe('fail');
|
||||
expect(pvCheck?.error?.remediation).toContain('pgvector/pgvector:pg17');
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 19. federated — non-bullmq queue → red config error, no network */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('returns red overall with config error when federated tier has non-bullmq queue (no network call)', async () => {
|
||||
const federatedBadQueueConfig: TierConfig = {
|
||||
tier: 'federated',
|
||||
storage: {
|
||||
type: 'postgres',
|
||||
url: 'postgresql://mosaic:mosaic@db-host:5433/mosaic',
|
||||
enableVector: true,
|
||||
},
|
||||
queue: { type: 'local' },
|
||||
};
|
||||
|
||||
const report = await probeServiceHealth(federatedBadQueueConfig);
|
||||
|
||||
expect(report.overall).toBe('red');
|
||||
|
||||
const valkey = report.services.find((s) => s.name === 'valkey');
|
||||
expect(valkey?.status).toBe('fail');
|
||||
expect(valkey?.error?.remediation).toMatch(/bullmq/i);
|
||||
|
||||
// Critically: no network call was made — MockRedis constructor must NOT have been called.
|
||||
expect(mocks.MockRedis).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
/* ---------------------------------------------------------------- */
|
||||
/* 20. durationMs actually measures real elapsed time */
|
||||
/* ---------------------------------------------------------------- */
|
||||
|
||||
it('measures real elapsed time for service probes', async () => {
|
||||
const DELAY_MS = 25;
|
||||
|
||||
// Make the postgres mock introduce a real wall-clock delay.
|
||||
mocks.mockSqlFn.mockImplementation(
|
||||
() =>
|
||||
new Promise((resolve) =>
|
||||
setTimeout(() => {
|
||||
resolve([]);
|
||||
}, DELAY_MS),
|
||||
),
|
||||
);
|
||||
|
||||
const report = await probeServiceHealth(STANDALONE_CONFIG);
|
||||
|
||||
const pgCheck = report.services.find((s) => s.name === 'postgres');
|
||||
expect(pgCheck).toBeDefined();
|
||||
// Must be >= 20ms (small slack for jitter). Would be 0 if timer were stubbed.
|
||||
expect(pgCheck!.durationMs).toBeGreaterThanOrEqual(20);
|
||||
});
|
||||
});
|
||||
559
packages/storage/src/tier-detection.ts
Normal file
559
packages/storage/src/tier-detection.ts
Normal file
@@ -0,0 +1,559 @@
|
||||
/**
|
||||
* Tier Detection — pre-flight service reachability probes.
|
||||
*
|
||||
* Lifted from apps/gateway/src/bootstrap/tier-detector.ts so both the gateway
|
||||
* and the mosaic CLI can share the same probe logic without duplicating code or
|
||||
* creating circular workspace dependencies.
|
||||
*
|
||||
* Library choices:
|
||||
* - Postgres: `postgres` npm package (already a dep via @mosaicstack/db / drizzle-orm).
|
||||
* - Valkey: `ioredis` (compatible with Valkey; same URL convention used by bullmq).
|
||||
*/
|
||||
|
||||
import postgres from 'postgres';
|
||||
import { Redis } from 'ioredis';
|
||||
import { redactErrMsg } from './redact-error.js';
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Local structural type — avoids circular dependency */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Minimal structural shape required for tier detection.
|
||||
* Mirrors the relevant fields of MosaicConfig (from @mosaicstack/config) without
|
||||
* creating a dependency cycle (config depends on storage for StorageConfig).
|
||||
* Any object that satisfies MosaicConfig also satisfies this type.
|
||||
*/
|
||||
export interface TierConfig {
|
||||
tier: 'local' | 'standalone' | 'federated';
|
||||
storage:
|
||||
| { type: 'pglite'; dataDir?: string }
|
||||
| { type: 'postgres'; url: string; enableVector?: boolean }
|
||||
| { type: 'files'; dataDir: string; format?: 'json' | 'md' };
|
||||
queue: { type: string; url?: string };
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Public types */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export interface ServiceCheck {
|
||||
name: 'postgres' | 'valkey' | 'pgvector';
|
||||
status: 'ok' | 'fail' | 'skipped';
|
||||
host?: string;
|
||||
port?: number;
|
||||
durationMs: number;
|
||||
error?: { message: string; remediation: string };
|
||||
}
|
||||
|
||||
export interface TierHealthReport {
|
||||
tier: 'local' | 'standalone' | 'federated';
|
||||
configPath?: string;
|
||||
overall: 'green' | 'yellow' | 'red';
|
||||
services: ServiceCheck[];
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Structured error type */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
export class TierDetectionError extends Error {
|
||||
public readonly service: 'postgres' | 'valkey' | 'pgvector' | 'config';
|
||||
public readonly host: string;
|
||||
public readonly port: number;
|
||||
public readonly remediation: string;
|
||||
|
||||
constructor(opts: {
|
||||
service: 'postgres' | 'valkey' | 'pgvector' | 'config';
|
||||
host: string;
|
||||
port: number;
|
||||
remediation: string;
|
||||
cause?: unknown;
|
||||
}) {
|
||||
const message =
|
||||
`[tier-detector] ${opts.service} unreachable or unusable at ` +
|
||||
`${opts.host}:${opts.port} — ${opts.remediation}`;
|
||||
super(message, { cause: opts.cause });
|
||||
this.name = 'TierDetectionError';
|
||||
this.service = opts.service;
|
||||
this.host = opts.host;
|
||||
this.port = opts.port;
|
||||
this.remediation = opts.remediation;
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* URL helpers */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/** Extract host and port from a URL string, returning safe fallbacks on parse failure. */
|
||||
function parseHostPort(url: string, defaultPort: number): { host: string; port: number } {
|
||||
try {
|
||||
const parsed = new URL(url);
|
||||
const host = parsed.hostname || 'unknown';
|
||||
const port = parsed.port ? parseInt(parsed.port, 10) : defaultPort;
|
||||
return { host, port };
|
||||
} catch {
|
||||
return { host: 'unknown', port: defaultPort };
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Internal probe results */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
interface ProbeResult {
|
||||
host: string;
|
||||
port: number;
|
||||
durationMs: number;
|
||||
error?: { message: string; remediation: string };
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Postgres probe */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
async function probePostgres(url: string): Promise<void> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
// Run a trivial query to confirm connectivity.
|
||||
await sql`SELECT 1`;
|
||||
} catch (cause) {
|
||||
throw new TierDetectionError({
|
||||
service: 'postgres',
|
||||
host,
|
||||
port,
|
||||
remediation:
|
||||
'Start Postgres: `docker compose -f docker-compose.federated.yml --profile federated up -d postgres-federated`',
|
||||
cause,
|
||||
});
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {
|
||||
// Ignore cleanup errors — we already have what we need.
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function probePostgresMeasured(url: string): Promise<ProbeResult> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
const start = Date.now();
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
await sql`SELECT 1`;
|
||||
return { host, port, durationMs: Date.now() - start };
|
||||
} catch (cause) {
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
durationMs: Date.now() - start,
|
||||
error: {
|
||||
message: redactErrMsg(cause instanceof Error ? cause.message : String(cause)),
|
||||
remediation:
|
||||
'Start Postgres: `docker compose -f docker-compose.federated.yml --profile federated up -d postgres-federated`',
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* pgvector probe */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
async function probePgvector(url: string): Promise<void> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
// This succeeds whether the extension is already installed or freshly created.
|
||||
// It errors only if the pgvector shared library is missing from the Postgres binary.
|
||||
await sql`CREATE EXTENSION IF NOT EXISTS vector`;
|
||||
} catch (cause) {
|
||||
const causeMsg = cause instanceof Error ? cause.message.toLowerCase() : '';
|
||||
const isLibraryMissing = causeMsg.includes('extension "vector" is not available');
|
||||
const remediation = isLibraryMissing
|
||||
? 'Use the `pgvector/pgvector:pg17` image, not the stock `postgres:17` image. See `docker-compose.federated.yml`.'
|
||||
: 'The database role lacks permission to CREATE EXTENSION. Grant `CREATE` on the database, or run as a superuser.';
|
||||
throw new TierDetectionError({
|
||||
service: 'pgvector',
|
||||
host,
|
||||
port,
|
||||
remediation,
|
||||
cause,
|
||||
});
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {
|
||||
// Ignore cleanup errors.
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function probePgvectorMeasured(url: string): Promise<ProbeResult> {
|
||||
const { host, port } = parseHostPort(url, 5432);
|
||||
const start = Date.now();
|
||||
let sql: ReturnType<typeof postgres> | undefined;
|
||||
try {
|
||||
sql = postgres(url, {
|
||||
max: 1,
|
||||
connect_timeout: 5,
|
||||
idle_timeout: 5,
|
||||
});
|
||||
await sql`CREATE EXTENSION IF NOT EXISTS vector`;
|
||||
return { host, port, durationMs: Date.now() - start };
|
||||
} catch (cause) {
|
||||
const causeMsg = cause instanceof Error ? cause.message.toLowerCase() : '';
|
||||
const isLibraryMissing = causeMsg.includes('extension "vector" is not available');
|
||||
const remediation = isLibraryMissing
|
||||
? 'Use the `pgvector/pgvector:pg17` image, not the stock `postgres:17` image. See `docker-compose.federated.yml`.'
|
||||
: 'The database role lacks permission to CREATE EXTENSION. Grant `CREATE` on the database, or run as a superuser.';
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
durationMs: Date.now() - start,
|
||||
error: {
|
||||
message: redactErrMsg(cause instanceof Error ? cause.message : String(cause)),
|
||||
remediation,
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
if (sql) {
|
||||
await sql.end({ timeout: 2 }).catch(() => {});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Valkey probe */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
const DEFAULT_VALKEY_URL = 'redis://localhost:6380';
|
||||
|
||||
async function probeValkey(url: string): Promise<void> {
|
||||
const { host, port } = parseHostPort(url, 6380);
|
||||
const client = new Redis(url, {
|
||||
enableReadyCheck: false,
|
||||
maxRetriesPerRequest: 0,
|
||||
retryStrategy: () => null, // no retries — fail fast
|
||||
lazyConnect: true,
|
||||
connectTimeout: 5000, // fail-fast: 5-second hard cap on connection attempt
|
||||
});
|
||||
|
||||
try {
|
||||
await client.connect();
|
||||
const pong = await client.ping();
|
||||
if (pong !== 'PONG') {
|
||||
throw new Error(`Unexpected PING response: ${pong}`);
|
||||
}
|
||||
} catch (cause) {
|
||||
throw new TierDetectionError({
|
||||
service: 'valkey',
|
||||
host,
|
||||
port,
|
||||
remediation:
|
||||
'Start Valkey: `docker compose -f docker-compose.federated.yml --profile federated up -d valkey-federated`',
|
||||
cause,
|
||||
});
|
||||
} finally {
|
||||
client.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
async function probeValkeyMeasured(url: string): Promise<ProbeResult> {
|
||||
const { host, port } = parseHostPort(url, 6380);
|
||||
const start = Date.now();
|
||||
const client = new Redis(url, {
|
||||
enableReadyCheck: false,
|
||||
maxRetriesPerRequest: 0,
|
||||
retryStrategy: () => null,
|
||||
lazyConnect: true,
|
||||
connectTimeout: 5000,
|
||||
});
|
||||
try {
|
||||
await client.connect();
|
||||
const pong = await client.ping();
|
||||
if (pong !== 'PONG') {
|
||||
throw new Error(`Unexpected PING response: ${pong}`);
|
||||
}
|
||||
return { host, port, durationMs: Date.now() - start };
|
||||
} catch (cause) {
|
||||
return {
|
||||
host,
|
||||
port,
|
||||
durationMs: Date.now() - start,
|
||||
error: {
|
||||
message: redactErrMsg(cause instanceof Error ? cause.message : String(cause)),
|
||||
remediation:
|
||||
'Start Valkey: `docker compose -f docker-compose.federated.yml --profile federated up -d valkey-federated`',
|
||||
},
|
||||
};
|
||||
} finally {
|
||||
client.disconnect();
|
||||
}
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Public entry points */
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* Assert that all services required by `config.tier` are reachable.
|
||||
*
|
||||
* - `local` — no-op (PGlite is in-process; no external services).
|
||||
* - `standalone` — assert Postgres + Valkey (if queue.type === 'bullmq').
|
||||
* - `federated` — assert Postgres + Valkey + pgvector installability.
|
||||
*
|
||||
* Throws `TierDetectionError` on the first failure with host:port and
|
||||
* a remediation hint.
|
||||
*/
|
||||
export async function detectAndAssertTier(config: TierConfig): Promise<void> {
|
||||
if (config.tier === 'local') {
|
||||
// PGlite runs in-process — nothing to probe.
|
||||
return;
|
||||
}
|
||||
|
||||
const pgUrl =
|
||||
config.storage.type === 'postgres' ? config.storage.url : 'postgresql://localhost:5432/mosaic';
|
||||
|
||||
const valkeyUrl =
|
||||
config.queue.type === 'bullmq' ? (config.queue.url ?? DEFAULT_VALKEY_URL) : null;
|
||||
|
||||
if (config.tier === 'standalone') {
|
||||
await probePostgres(pgUrl);
|
||||
if (valkeyUrl) {
|
||||
await probeValkey(valkeyUrl);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// tier === 'federated'
|
||||
// Reject misconfigured queue upfront — federated requires bullmq + a Valkey URL.
|
||||
if (config.queue.type !== 'bullmq') {
|
||||
throw new TierDetectionError({
|
||||
service: 'config',
|
||||
host: 'localhost',
|
||||
port: 0,
|
||||
remediation:
|
||||
"Federated tier requires queue.type === 'bullmq'. " +
|
||||
"Set queue: { type: 'bullmq', url: 'redis://...' } in your mosaic.config.json.",
|
||||
});
|
||||
}
|
||||
const federatedValkeyUrl = config.queue.url ?? DEFAULT_VALKEY_URL;
|
||||
await probePostgres(pgUrl);
|
||||
await probeValkey(federatedValkeyUrl);
|
||||
await probePgvector(pgUrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Non-throwing variant for `mosaic gateway doctor`.
|
||||
*
|
||||
* Probes ALL required services even if some fail, returning a structured report.
|
||||
* Services not required for the current tier are reported as `skipped`.
|
||||
*
|
||||
* Overall status:
|
||||
* - `green` — all required services OK
|
||||
* - `yellow` — all required services OK, but a non-critical check failed
|
||||
* (currently unused — reserved for future optional probes)
|
||||
* - `red` — at least one required service failed
|
||||
*/
|
||||
export async function probeServiceHealth(
|
||||
config: TierConfig,
|
||||
configPath?: string,
|
||||
): Promise<TierHealthReport> {
|
||||
const tier = config.tier;
|
||||
|
||||
// local tier: PGlite is in-process, no external services needed.
|
||||
if (tier === 'local') {
|
||||
return {
|
||||
tier,
|
||||
configPath,
|
||||
overall: 'green',
|
||||
services: [
|
||||
{ name: 'postgres', status: 'skipped', durationMs: 0 },
|
||||
{ name: 'valkey', status: 'skipped', durationMs: 0 },
|
||||
{ name: 'pgvector', status: 'skipped', durationMs: 0 },
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
const pgUrl =
|
||||
config.storage.type === 'postgres' ? config.storage.url : 'postgresql://localhost:5432/mosaic';
|
||||
|
||||
const valkeyUrl =
|
||||
config.queue.type === 'bullmq' ? (config.queue.url ?? DEFAULT_VALKEY_URL) : null;
|
||||
|
||||
const services: ServiceCheck[] = [];
|
||||
let hasFailure = false;
|
||||
|
||||
if (tier === 'standalone') {
|
||||
// Postgres — required
|
||||
const pgResult = await probePostgresMeasured(pgUrl);
|
||||
if (pgResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'fail',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
error: pgResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'ok',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
});
|
||||
}
|
||||
|
||||
// Valkey — required if bullmq
|
||||
if (valkeyUrl) {
|
||||
const vkResult = await probeValkeyMeasured(valkeyUrl);
|
||||
if (vkResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'fail',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
error: vkResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'ok',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
services.push({ name: 'valkey', status: 'skipped', durationMs: 0 });
|
||||
}
|
||||
|
||||
// pgvector — not required for standalone
|
||||
services.push({ name: 'pgvector', status: 'skipped', durationMs: 0 });
|
||||
|
||||
return {
|
||||
tier,
|
||||
configPath,
|
||||
overall: hasFailure ? 'red' : 'green',
|
||||
services,
|
||||
};
|
||||
}
|
||||
|
||||
// tier === 'federated'
|
||||
// Postgres — required
|
||||
const pgResult = await probePostgresMeasured(pgUrl);
|
||||
if (pgResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'fail',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
error: pgResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'postgres',
|
||||
status: 'ok',
|
||||
host: pgResult.host,
|
||||
port: pgResult.port,
|
||||
durationMs: pgResult.durationMs,
|
||||
});
|
||||
}
|
||||
|
||||
// Valkey — required for federated (queue.type must be bullmq)
|
||||
if (config.queue.type !== 'bullmq') {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'fail',
|
||||
host: 'localhost',
|
||||
port: 0,
|
||||
durationMs: 0,
|
||||
error: {
|
||||
message: "Federated tier requires queue.type === 'bullmq'",
|
||||
remediation:
|
||||
"Set queue: { type: 'bullmq', url: 'redis://...' } in your mosaic.config.json.",
|
||||
},
|
||||
});
|
||||
} else {
|
||||
const federatedValkeyUrl = config.queue.url ?? DEFAULT_VALKEY_URL;
|
||||
const vkResult = await probeValkeyMeasured(federatedValkeyUrl);
|
||||
if (vkResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'fail',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
error: vkResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'valkey',
|
||||
status: 'ok',
|
||||
host: vkResult.host,
|
||||
port: vkResult.port,
|
||||
durationMs: vkResult.durationMs,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// pgvector — required for federated
|
||||
const pvResult = await probePgvectorMeasured(pgUrl);
|
||||
if (pvResult.error) {
|
||||
hasFailure = true;
|
||||
services.push({
|
||||
name: 'pgvector',
|
||||
status: 'fail',
|
||||
host: pvResult.host,
|
||||
port: pvResult.port,
|
||||
durationMs: pvResult.durationMs,
|
||||
error: pvResult.error,
|
||||
});
|
||||
} else {
|
||||
services.push({
|
||||
name: 'pgvector',
|
||||
status: 'ok',
|
||||
host: pvResult.host,
|
||||
port: pvResult.port,
|
||||
durationMs: pvResult.durationMs,
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
tier,
|
||||
configPath,
|
||||
overall: hasFailure ? 'red' : 'green',
|
||||
services,
|
||||
};
|
||||
}
|
||||
@@ -38,6 +38,6 @@ export interface StorageAdapter {
|
||||
}
|
||||
|
||||
export type StorageConfig =
|
||||
| { type: 'postgres'; url: string }
|
||||
| { type: 'postgres'; url: string; enableVector?: boolean }
|
||||
| { type: 'pglite'; dataDir?: string }
|
||||
| { type: 'files'; dataDir: string; format?: 'json' | 'md' };
|
||||
|
||||
8
packages/storage/vitest.config.ts
Normal file
8
packages/storage/vitest.config.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { defineConfig } from 'vitest/config';
|
||||
|
||||
export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'node',
|
||||
},
|
||||
});
|
||||
231
pnpm-lock.yaml
generated
231
pnpm-lock.yaml
generated
@@ -131,6 +131,9 @@ importers:
|
||||
'@opentelemetry/semantic-conventions':
|
||||
specifier: ^1.40.0
|
||||
version: 1.40.0
|
||||
'@peculiar/x509':
|
||||
specifier: ^2.0.0
|
||||
version: 2.0.0
|
||||
'@sinclair/typebox':
|
||||
specifier: ^0.34.48
|
||||
version: 0.34.48
|
||||
@@ -152,12 +155,21 @@ importers:
|
||||
fastify:
|
||||
specifier: ^5.0.0
|
||||
version: 5.8.2
|
||||
ioredis:
|
||||
specifier: ^5.10.0
|
||||
version: 5.10.0
|
||||
jose:
|
||||
specifier: ^6.2.2
|
||||
version: 6.2.2
|
||||
node-cron:
|
||||
specifier: ^4.2.1
|
||||
version: 4.2.1
|
||||
openai:
|
||||
specifier: ^6.32.0
|
||||
version: 6.32.0(ws@8.20.0)(zod@4.3.6)
|
||||
postgres:
|
||||
specifier: ^3.4.8
|
||||
version: 3.4.8
|
||||
reflect-metadata:
|
||||
specifier: ^0.2.0
|
||||
version: 0.2.2
|
||||
@@ -642,7 +654,16 @@ importers:
|
||||
commander:
|
||||
specifier: ^13.0.0
|
||||
version: 13.1.0
|
||||
ioredis:
|
||||
specifier: ^5.10.0
|
||||
version: 5.10.0
|
||||
postgres:
|
||||
specifier: ^3.4.8
|
||||
version: 3.4.8
|
||||
devDependencies:
|
||||
drizzle-orm:
|
||||
specifier: ^0.45.1
|
||||
version: 0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8)
|
||||
typescript:
|
||||
specifier: ^5.8.0
|
||||
version: 5.9.3
|
||||
@@ -3045,6 +3066,40 @@ packages:
|
||||
'@paralleldrive/cuid2@2.3.1':
|
||||
resolution: {integrity: sha512-XO7cAxhnTZl0Yggq6jOgjiOHhbgcO4NqFqwSmQpjK3b6TEE6Uj/jfSk6wzYyemh3+I0sHirKSetjQwn5cZktFw==}
|
||||
|
||||
'@peculiar/asn1-cms@2.6.1':
|
||||
resolution: {integrity: sha512-vdG4fBF6Lkirkcl53q6eOdn3XYKt+kJTG59edgRZORlg/3atWWEReRCx5rYE1ZzTTX6vLK5zDMjHh7vbrcXGtw==}
|
||||
|
||||
'@peculiar/asn1-csr@2.6.1':
|
||||
resolution: {integrity: sha512-WRWnKfIocHyzFYQTka8O/tXCiBquAPSrRjXbOkHbO4qdmS6loffCEGs+rby6WxxGdJCuunnhS2duHURhjyio6w==}
|
||||
|
||||
'@peculiar/asn1-ecc@2.6.1':
|
||||
resolution: {integrity: sha512-+Vqw8WFxrtDIN5ehUdvlN2m73exS2JVG0UAyfVB31gIfor3zWEAQPD+K9ydCxaj3MLen9k0JhKpu9LqviuCE1g==}
|
||||
|
||||
'@peculiar/asn1-pfx@2.6.1':
|
||||
resolution: {integrity: sha512-nB5jVQy3MAAWvq0KY0R2JUZG8bO/bTLpnwyOzXyEh/e54ynGTatAR+csOnXkkVD9AFZ2uL8Z7EV918+qB1qDvw==}
|
||||
|
||||
'@peculiar/asn1-pkcs8@2.6.1':
|
||||
resolution: {integrity: sha512-JB5iQ9Izn5yGMw3ZG4Nw3Xn/hb/G38GYF3lf7WmJb8JZUydhVGEjK/ZlFSWhnlB7K/4oqEs8HnfFIKklhR58Tw==}
|
||||
|
||||
'@peculiar/asn1-pkcs9@2.6.1':
|
||||
resolution: {integrity: sha512-5EV8nZoMSxeWmcxWmmcolg22ojZRgJg+Y9MX2fnE2bGRo5KQLqV5IL9kdSQDZxlHz95tHvIq9F//bvL1OeNILw==}
|
||||
|
||||
'@peculiar/asn1-rsa@2.6.1':
|
||||
resolution: {integrity: sha512-1nVMEh46SElUt5CB3RUTV4EG/z7iYc7EoaDY5ECwganibQPkZ/Y2eMsTKB/LeyrUJ+W/tKoD9WUqIy8vB+CEdA==}
|
||||
|
||||
'@peculiar/asn1-schema@2.6.0':
|
||||
resolution: {integrity: sha512-xNLYLBFTBKkCzEZIw842BxytQQATQv+lDTCEMZ8C196iJcJJMBUZxrhSTxLaohMyKK8QlzRNTRkUmanucnDSqg==}
|
||||
|
||||
'@peculiar/asn1-x509-attr@2.6.1':
|
||||
resolution: {integrity: sha512-tlW6cxoHwgcQghnJwv3YS+9OO1737zgPogZ+CgWRUK4roEwIPzRH4JEiG770xe5HX2ATfCpmX60gurfWIF9dcQ==}
|
||||
|
||||
'@peculiar/asn1-x509@2.6.1':
|
||||
resolution: {integrity: sha512-O9jT5F1A2+t3r7C4VT7LYGXqkGLK7Kj1xFpz7U0isPrubwU5PbDoyYtx6MiGst29yq7pXN5vZbQFKRCP+lLZlA==}
|
||||
|
||||
'@peculiar/x509@2.0.0':
|
||||
resolution: {integrity: sha512-r10lkuy6BNfRmyYdRAfgu6dq0HOmyIV2OLhXWE3gDEPBdX1b8miztJVyX/UxWhLwemNyDP3CLZHpDxDwSY0xaA==}
|
||||
engines: {node: '>=20.0.0'}
|
||||
|
||||
'@pinojs/redact@0.4.0':
|
||||
resolution: {integrity: sha512-k2ENnmBugE/rzQfEcdWHcCY+/FM3VLzH9cYEsbdsoqrvzAKRhUZeRNhAZvB8OitQJ1TBed3yqWtdjzS6wJKBwg==}
|
||||
|
||||
@@ -3940,6 +3995,10 @@ packages:
|
||||
asap@2.0.6:
|
||||
resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==}
|
||||
|
||||
asn1js@3.0.10:
|
||||
resolution: {integrity: sha512-S2s3aOytiKdFRdulw2qPE51MzjzVOisppcVv7jVFR+Kw0kxwvFrDcYA0h7Ndqbmj0HkMIXYWaoj7fli8kgx1eg==}
|
||||
engines: {node: '>=12.0.0'}
|
||||
|
||||
assertion-error@2.0.1:
|
||||
resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==}
|
||||
engines: {node: '>=12'}
|
||||
@@ -5311,6 +5370,9 @@ packages:
|
||||
jose@6.2.1:
|
||||
resolution: {integrity: sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==}
|
||||
|
||||
jose@6.2.2:
|
||||
resolution: {integrity: sha512-d7kPDd34KO/YnzaDOlikGpOurfF0ByC2sEV4cANCtdqLlTfBlw2p14O/5d/zv40gJPbIQxfES3nSx1/oYNyuZQ==}
|
||||
|
||||
js-tokens@4.0.0:
|
||||
resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==}
|
||||
|
||||
@@ -6249,6 +6311,13 @@ packages:
|
||||
resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==}
|
||||
engines: {node: '>=6'}
|
||||
|
||||
pvtsutils@1.3.6:
|
||||
resolution: {integrity: sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg==}
|
||||
|
||||
pvutils@1.1.5:
|
||||
resolution: {integrity: sha512-KTqnxsgGiQ6ZAzZCVlJH5eOjSnvlyEgx1m8bkRJfOhmGRqfo5KLvmAlACQkrjEtOQ4B7wF9TdSLIs9O90MX9xA==}
|
||||
engines: {node: '>=16.0.0'}
|
||||
|
||||
qrcode-terminal@0.12.0:
|
||||
resolution: {integrity: sha512-EXtzRZmC+YGmGlDFbXKxQiMZNwCLEO6BANKXG4iCtSIM0yqc/pappSx3RIKr4r0uh5JsBckOXeKrB3Iz7mdQpQ==}
|
||||
hasBin: true
|
||||
@@ -6817,6 +6886,9 @@ packages:
|
||||
ts-mixer@6.0.4:
|
||||
resolution: {integrity: sha512-ufKpbmrugz5Aou4wcr5Wc1UUFWOLhq+Fm6qa6P0w0K5Qw2yhaUoiWszhCVuNQyNwrlGiscHOmqYoAox1PtvgjA==}
|
||||
|
||||
tslib@1.14.1:
|
||||
resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==}
|
||||
|
||||
tslib@2.8.1:
|
||||
resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==}
|
||||
|
||||
@@ -6829,6 +6901,10 @@ packages:
|
||||
engines: {node: '>=18.0.0'}
|
||||
hasBin: true
|
||||
|
||||
tsyringe@4.10.0:
|
||||
resolution: {integrity: sha512-axr3IdNuVIxnaK5XGEUFTu3YmAQ6lllgrvqfEoR16g/HGnYY/6We4oWENtAnzK6/LpJ2ur9PAb80RBt7/U4ugw==}
|
||||
engines: {node: '>= 6.0.0'}
|
||||
|
||||
tunnel-agent@0.6.0:
|
||||
resolution: {integrity: sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==}
|
||||
|
||||
@@ -7744,49 +7820,49 @@ snapshots:
|
||||
|
||||
'@bcoe/v8-coverage@0.2.3': {}
|
||||
|
||||
'@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)':
|
||||
'@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)':
|
||||
dependencies:
|
||||
'@better-auth/utils': 0.3.1
|
||||
'@better-fetch/fetch': 1.1.21
|
||||
'@standard-schema/spec': 1.1.0
|
||||
better-call: 1.3.2(zod@4.3.6)
|
||||
jose: 6.2.1
|
||||
jose: 6.2.2
|
||||
kysely: 0.28.11
|
||||
nanostores: 1.1.1
|
||||
zod: 4.3.6
|
||||
|
||||
'@better-auth/drizzle-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(drizzle-orm@0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8))':
|
||||
'@better-auth/drizzle-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(drizzle-orm@0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8))':
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/utils': 0.3.1
|
||||
optionalDependencies:
|
||||
drizzle-orm: 0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8)
|
||||
|
||||
'@better-auth/kysely-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(kysely@0.28.11)':
|
||||
'@better-auth/kysely-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(kysely@0.28.11)':
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/utils': 0.3.1
|
||||
kysely: 0.28.11
|
||||
|
||||
'@better-auth/memory-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)':
|
||||
'@better-auth/memory-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)':
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/utils': 0.3.1
|
||||
|
||||
'@better-auth/mongo-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(mongodb@7.1.0(socks@2.8.7))':
|
||||
'@better-auth/mongo-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(mongodb@7.1.0(socks@2.8.7))':
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/utils': 0.3.1
|
||||
mongodb: 7.1.0(socks@2.8.7)
|
||||
|
||||
'@better-auth/prisma-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)':
|
||||
'@better-auth/prisma-adapter@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)':
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/utils': 0.3.1
|
||||
|
||||
'@better-auth/telemetry@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))':
|
||||
'@better-auth/telemetry@1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))':
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/utils': 0.3.1
|
||||
'@better-fetch/fetch': 1.1.21
|
||||
|
||||
@@ -8805,7 +8881,7 @@ snapshots:
|
||||
express: 5.2.1
|
||||
express-rate-limit: 8.3.1(express@5.2.1)
|
||||
hono: 4.12.8
|
||||
jose: 6.2.1
|
||||
jose: 6.2.2
|
||||
json-schema-typed: 8.0.2
|
||||
pkce-challenge: 5.0.1
|
||||
raw-body: 3.0.2
|
||||
@@ -9935,6 +10011,95 @@ snapshots:
|
||||
dependencies:
|
||||
'@noble/hashes': 1.8.0
|
||||
|
||||
'@peculiar/asn1-cms@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
'@peculiar/asn1-x509-attr': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-csr@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-ecc@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-pfx@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-cms': 2.6.1
|
||||
'@peculiar/asn1-pkcs8': 2.6.1
|
||||
'@peculiar/asn1-rsa': 2.6.1
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-pkcs8@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-pkcs9@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-cms': 2.6.1
|
||||
'@peculiar/asn1-pfx': 2.6.1
|
||||
'@peculiar/asn1-pkcs8': 2.6.1
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
'@peculiar/asn1-x509-attr': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-rsa@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-schema@2.6.0':
|
||||
dependencies:
|
||||
asn1js: 3.0.10
|
||||
pvtsutils: 1.3.6
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-x509-attr@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
asn1js: 3.0.10
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/asn1-x509@2.6.1':
|
||||
dependencies:
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
asn1js: 3.0.10
|
||||
pvtsutils: 1.3.6
|
||||
tslib: 2.8.1
|
||||
|
||||
'@peculiar/x509@2.0.0':
|
||||
dependencies:
|
||||
'@peculiar/asn1-cms': 2.6.1
|
||||
'@peculiar/asn1-csr': 2.6.1
|
||||
'@peculiar/asn1-ecc': 2.6.1
|
||||
'@peculiar/asn1-pkcs9': 2.6.1
|
||||
'@peculiar/asn1-rsa': 2.6.1
|
||||
'@peculiar/asn1-schema': 2.6.0
|
||||
'@peculiar/asn1-x509': 2.6.1
|
||||
pvtsutils: 1.3.6
|
||||
tslib: 2.8.1
|
||||
tsyringe: 4.10.0
|
||||
|
||||
'@pinojs/redact@0.4.0': {}
|
||||
|
||||
'@pkgjs/parseargs@0.11.0':
|
||||
@@ -10891,6 +11056,12 @@ snapshots:
|
||||
|
||||
asap@2.0.6: {}
|
||||
|
||||
asn1js@3.0.10:
|
||||
dependencies:
|
||||
pvtsutils: 1.3.6
|
||||
pvutils: 1.1.5
|
||||
tslib: 2.8.1
|
||||
|
||||
assertion-error@2.0.1: {}
|
||||
|
||||
ast-types@0.13.4:
|
||||
@@ -10935,20 +11106,20 @@ snapshots:
|
||||
|
||||
better-auth@1.5.5(better-sqlite3@12.8.0)(drizzle-kit@0.31.9)(drizzle-orm@0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8))(mongodb@7.1.0(socks@2.8.7))(next@16.1.6(@opentelemetry/api@1.9.0)(@playwright/test@1.58.2)(react-dom@19.2.4(react@19.2.4))(react@19.2.4))(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@2.1.9(@types/node@22.19.15)(jsdom@29.0.0(@noble/hashes@2.0.1))(lightningcss@1.31.1)):
|
||||
dependencies:
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/drizzle-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(drizzle-orm@0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8))
|
||||
'@better-auth/kysely-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(kysely@0.28.11)
|
||||
'@better-auth/memory-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)
|
||||
'@better-auth/mongo-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(mongodb@7.1.0(socks@2.8.7))
|
||||
'@better-auth/prisma-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)
|
||||
'@better-auth/telemetry': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.1)(kysely@0.28.11)(nanostores@1.1.1))
|
||||
'@better-auth/core': 1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1)
|
||||
'@better-auth/drizzle-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(drizzle-orm@0.45.1(@electric-sql/pglite@0.2.17)(@opentelemetry/api@1.9.0)(@types/better-sqlite3@7.6.13)(@types/pg@8.15.6)(better-sqlite3@12.8.0)(kysely@0.28.11)(postgres@3.4.8))
|
||||
'@better-auth/kysely-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(kysely@0.28.11)
|
||||
'@better-auth/memory-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)
|
||||
'@better-auth/mongo-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)(mongodb@7.1.0(socks@2.8.7))
|
||||
'@better-auth/prisma-adapter': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))(@better-auth/utils@0.3.1)
|
||||
'@better-auth/telemetry': 1.5.5(@better-auth/core@1.5.5(@better-auth/utils@0.3.1)(@better-fetch/fetch@1.1.21)(better-call@1.3.2(zod@4.3.6))(jose@6.2.2)(kysely@0.28.11)(nanostores@1.1.1))
|
||||
'@better-auth/utils': 0.3.1
|
||||
'@better-fetch/fetch': 1.1.21
|
||||
'@noble/ciphers': 2.1.1
|
||||
'@noble/hashes': 2.0.1
|
||||
better-call: 1.3.2(zod@4.3.6)
|
||||
defu: 6.1.4
|
||||
jose: 6.2.1
|
||||
jose: 6.2.2
|
||||
kysely: 0.28.11
|
||||
nanostores: 1.1.1
|
||||
zod: 4.3.6
|
||||
@@ -12391,6 +12562,8 @@ snapshots:
|
||||
|
||||
jose@6.2.1: {}
|
||||
|
||||
jose@6.2.2: {}
|
||||
|
||||
js-tokens@4.0.0: {}
|
||||
|
||||
js-yaml@4.1.1:
|
||||
@@ -13486,6 +13659,12 @@ snapshots:
|
||||
|
||||
punycode@2.3.1: {}
|
||||
|
||||
pvtsutils@1.3.6:
|
||||
dependencies:
|
||||
tslib: 2.8.1
|
||||
|
||||
pvutils@1.1.5: {}
|
||||
|
||||
qrcode-terminal@0.12.0: {}
|
||||
|
||||
qs@6.15.0:
|
||||
@@ -14169,6 +14348,8 @@ snapshots:
|
||||
|
||||
ts-mixer@6.0.4: {}
|
||||
|
||||
tslib@1.14.1: {}
|
||||
|
||||
tslib@2.8.1: {}
|
||||
|
||||
tslog@4.10.2: {}
|
||||
@@ -14180,6 +14361,10 @@ snapshots:
|
||||
optionalDependencies:
|
||||
fsevents: 2.3.3
|
||||
|
||||
tsyringe@4.10.0:
|
||||
dependencies:
|
||||
tslib: 1.14.1
|
||||
|
||||
tunnel-agent@0.6.0:
|
||||
dependencies:
|
||||
safe-buffer: 5.2.1
|
||||
|
||||
Reference in New Issue
Block a user