Files
telemetry-client-js/tests/prediction-cache.test.ts
Jason Woltje 177720e523 feat: TypeScript telemetry client SDK v0.1.0
Standalone npm package (@mosaicstack/telemetry-client) for reporting
task-completion telemetry and querying predictions from the Mosaic
Stack Telemetry server.

- TelemetryClient with setInterval-based background flush
- EventQueue (bounded FIFO array)
- BatchSubmitter with native fetch, exponential backoff, Retry-After
- PredictionCache (Map + TTL)
- EventBuilder with auto-generated event_id/timestamp
- Zero runtime dependencies (Node 18+ native APIs)
- 43 tests, 86% branch coverage

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-07 23:25:31 -06:00

127 lines
3.6 KiB
TypeScript

import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { PredictionCache } from '../src/prediction-cache.js';
import { PredictionQuery, PredictionResponse } from '../src/types/predictions.js';
import { TaskType, Complexity, Provider } from '../src/types/events.js';
function makeQuery(overrides: Partial<PredictionQuery> = {}): PredictionQuery {
return {
task_type: TaskType.IMPLEMENTATION,
model: 'claude-3-opus',
provider: Provider.ANTHROPIC,
complexity: Complexity.MEDIUM,
...overrides,
};
}
function makeResponse(sampleSize = 100): PredictionResponse {
return {
prediction: {
input_tokens: { p10: 500, p25: 750, median: 1000, p75: 1500, p90: 2000 },
output_tokens: { p10: 200, p25: 350, median: 500, p75: 750, p90: 1000 },
cost_usd_micros: { median: 50000 },
duration_ms: { median: 30000 },
correction_factors: { input: 1.1, output: 1.05 },
quality: { gate_pass_rate: 0.85, success_rate: 0.9 },
},
metadata: {
sample_size: sampleSize,
fallback_level: 0,
confidence: 'high',
last_updated: new Date().toISOString(),
cache_hit: false,
},
};
}
describe('PredictionCache', () => {
beforeEach(() => {
vi.useFakeTimers();
});
afterEach(() => {
vi.useRealTimers();
});
it('should return null for cache miss', () => {
const cache = new PredictionCache(60_000);
const result = cache.get(makeQuery());
expect(result).toBeNull();
});
it('should return cached prediction on hit', () => {
const cache = new PredictionCache(60_000);
const query = makeQuery();
const response = makeResponse();
cache.set(query, response);
const result = cache.get(query);
expect(result).toEqual(response);
});
it('should return null when entry has expired', () => {
const cache = new PredictionCache(60_000); // 60s TTL
const query = makeQuery();
const response = makeResponse();
cache.set(query, response);
expect(cache.get(query)).toEqual(response);
// Advance time past TTL
vi.advanceTimersByTime(61_000);
expect(cache.get(query)).toBeNull();
});
it('should differentiate queries by all fields', () => {
const cache = new PredictionCache(60_000);
const query1 = makeQuery({ task_type: TaskType.IMPLEMENTATION });
const query2 = makeQuery({ task_type: TaskType.DEBUGGING });
const response1 = makeResponse(100);
const response2 = makeResponse(200);
cache.set(query1, response1);
cache.set(query2, response2);
expect(cache.get(query1)?.metadata.sample_size).toBe(100);
expect(cache.get(query2)?.metadata.sample_size).toBe(200);
});
it('should clear all entries', () => {
const cache = new PredictionCache(60_000);
cache.set(makeQuery(), makeResponse());
cache.set(makeQuery({ task_type: TaskType.TESTING }), makeResponse());
expect(cache.size).toBe(2);
cache.clear();
expect(cache.size).toBe(0);
expect(cache.get(makeQuery())).toBeNull();
});
it('should overwrite existing entry with same query', () => {
const cache = new PredictionCache(60_000);
const query = makeQuery();
cache.set(query, makeResponse(100));
cache.set(query, makeResponse(200));
expect(cache.size).toBe(1);
expect(cache.get(query)?.metadata.sample_size).toBe(200);
});
it('should clean expired entry on get', () => {
const cache = new PredictionCache(60_000);
const query = makeQuery();
cache.set(query, makeResponse());
expect(cache.size).toBe(1);
vi.advanceTimersByTime(61_000);
// get() should clean up
cache.get(query);
expect(cache.size).toBe(0);
});
});