feat: TypeScript telemetry client SDK v0.1.0

Standalone npm package (@mosaicstack/telemetry-client) for reporting
task-completion telemetry and querying predictions from the Mosaic
Stack Telemetry server.

- TelemetryClient with setInterval-based background flush
- EventQueue (bounded FIFO array)
- BatchSubmitter with native fetch, exponential backoff, Retry-After
- PredictionCache (Map + TTL)
- EventBuilder with auto-generated event_id/timestamp
- Zero runtime dependencies (Node 18+ native APIs)
- 43 tests, 86% branch coverage

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-02-07 23:25:31 -06:00
commit 177720e523
26 changed files with 5643 additions and 0 deletions

158
src/client.ts Normal file
View File

@@ -0,0 +1,158 @@
import { TelemetryConfig, ResolvedConfig, resolveConfig } from './config.js';
import { EventQueue } from './queue.js';
import { BatchSubmitter } from './submitter.js';
import { PredictionCache } from './prediction-cache.js';
import { EventBuilder } from './event-builder.js';
import { TaskCompletionEvent } from './types/events.js';
import { PredictionQuery, PredictionResponse } from './types/predictions.js';
import { BatchPredictionResponse } from './types/common.js';
/**
* Main telemetry client. Queues task-completion events for background
* batch submission and provides access to crowd-sourced predictions.
*/
export class TelemetryClient {
private readonly config: ResolvedConfig;
private readonly queue: EventQueue;
private readonly submitter: BatchSubmitter;
private readonly predictionCache: PredictionCache;
private readonly _eventBuilder: EventBuilder;
private intervalId: ReturnType<typeof setInterval> | null = null;
private _isRunning = false;
constructor(config: TelemetryConfig) {
this.config = resolveConfig(config);
this.queue = new EventQueue(this.config.maxQueueSize);
this.submitter = new BatchSubmitter(this.config);
this.predictionCache = new PredictionCache(this.config.predictionCacheTtlMs);
this._eventBuilder = new EventBuilder(this.config);
}
/** Get the event builder for constructing events. */
get eventBuilder(): EventBuilder {
return this._eventBuilder;
}
/** Start background submission via setInterval. Idempotent. */
start(): void {
if (this._isRunning) {
return;
}
this._isRunning = true;
this.intervalId = setInterval(() => {
void this.flush();
}, this.config.submitIntervalMs);
}
/** Stop background submission, flush remaining events. */
async stop(): Promise<void> {
if (!this._isRunning) {
return;
}
this._isRunning = false;
if (this.intervalId !== null) {
clearInterval(this.intervalId);
this.intervalId = null;
}
await this.flush();
}
/** Queue an event for batch submission. Never throws. */
track(event: TaskCompletionEvent): void {
try {
if (!this.config.enabled) {
return;
}
this.queue.enqueue(event);
} catch (error) {
this.handleError(error);
}
}
/** Get a cached prediction. Returns null if not cached/expired. */
getPrediction(query: PredictionQuery): PredictionResponse | null {
return this.predictionCache.get(query);
}
/** Force-refresh predictions from server. */
async refreshPredictions(queries: PredictionQuery[]): Promise<void> {
try {
const url = `${this.config.serverUrl}/v1/predictions/batch`;
const controller = new AbortController();
const timeout = setTimeout(
() => controller.abort(),
this.config.requestTimeoutMs,
);
try {
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ queries }),
signal: controller.signal,
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const body = (await response.json()) as BatchPredictionResponse;
for (let i = 0; i < queries.length; i++) {
if (body.results[i]) {
this.predictionCache.set(queries[i], body.results[i]);
}
}
} finally {
clearTimeout(timeout);
}
} catch (error) {
this.handleError(error);
}
}
/** Number of events currently queued. */
get queueSize(): number {
return this.queue.size;
}
/** Whether the client is currently running. */
get isRunning(): boolean {
return this._isRunning;
}
/** Flush the queue by draining and submitting batches. */
private async flush(): Promise<void> {
while (!this.queue.isEmpty) {
const batch = this.queue.drain(this.config.batchSize);
if (batch.length === 0) break;
try {
const result = await this.submitter.submit(batch);
if (!result.success) {
// Re-enqueue events that failed to submit
this.queue.prepend(batch);
if (result.error) {
this.handleError(result.error);
}
break; // Stop flushing on failure to avoid loops
}
} catch (error) {
this.queue.prepend(batch);
this.handleError(error);
break;
}
}
}
private handleError(error: unknown): void {
const err = error instanceof Error ? error : new Error(String(error));
try {
this.config.onError(err);
} catch {
// Prevent error handler from throwing
}
}
}

62
src/config.ts Normal file
View File

@@ -0,0 +1,62 @@
export interface TelemetryConfig {
/** Base URL of the telemetry server (e.g., "https://tel.mosaicstack.dev") */
serverUrl: string;
/** API key for authentication (64-char hex string) */
apiKey: string;
/** Instance UUID for this client */
instanceId: string;
/** Whether telemetry collection is enabled. Default: true */
enabled?: boolean;
/** Interval between automatic batch submissions in ms. Default: 300_000 (5 min) */
submitIntervalMs?: number;
/** Maximum number of events held in queue. Default: 1000 */
maxQueueSize?: number;
/** Maximum events per batch submission. Default: 100 */
batchSize?: number;
/** HTTP request timeout in ms. Default: 10_000 */
requestTimeoutMs?: number;
/** TTL for cached predictions in ms. Default: 21_600_000 (6 hours) */
predictionCacheTtlMs?: number;
/** If true, log events instead of sending them. Default: false */
dryRun?: boolean;
/** Maximum number of retries on failure. Default: 3 */
maxRetries?: number;
/** Optional callback invoked on errors */
onError?: (error: Error) => void;
}
export interface ResolvedConfig {
serverUrl: string;
apiKey: string;
instanceId: string;
enabled: boolean;
submitIntervalMs: number;
maxQueueSize: number;
batchSize: number;
requestTimeoutMs: number;
predictionCacheTtlMs: number;
dryRun: boolean;
maxRetries: number;
onError: (error: Error) => void;
}
const DEFAULT_ON_ERROR = (_error: Error): void => {
// Silent by default
};
export function resolveConfig(config: TelemetryConfig): ResolvedConfig {
return {
serverUrl: config.serverUrl.replace(/\/+$/, ''),
apiKey: config.apiKey,
instanceId: config.instanceId,
enabled: config.enabled ?? true,
submitIntervalMs: config.submitIntervalMs ?? 300_000,
maxQueueSize: config.maxQueueSize ?? 1000,
batchSize: config.batchSize ?? 100,
requestTimeoutMs: config.requestTimeoutMs ?? 10_000,
predictionCacheTtlMs: config.predictionCacheTtlMs ?? 21_600_000,
dryRun: config.dryRun ?? false,
maxRetries: config.maxRetries ?? 3,
onError: config.onError ?? DEFAULT_ON_ERROR,
};
}

62
src/event-builder.ts Normal file
View File

@@ -0,0 +1,62 @@
import { ResolvedConfig } from './config.js';
import {
Complexity,
Harness,
Outcome,
Provider,
QualityGate,
RepoSizeCategory,
TaskCompletionEvent,
TaskType,
} from './types/events.js';
export interface EventBuilderParams {
task_duration_ms: number;
task_type: TaskType;
complexity: Complexity;
harness: Harness;
model: string;
provider: Provider;
estimated_input_tokens: number;
estimated_output_tokens: number;
actual_input_tokens: number;
actual_output_tokens: number;
estimated_cost_usd_micros: number;
actual_cost_usd_micros: number;
quality_gate_passed: boolean;
quality_gates_run: QualityGate[];
quality_gates_failed: QualityGate[];
context_compactions: number;
context_rotations: number;
context_utilization_final: number;
outcome: Outcome;
retry_count: number;
language?: string | null;
repo_size_category?: RepoSizeCategory | null;
}
/**
* Convenience builder for TaskCompletionEvent objects.
* Auto-generates event_id, timestamp, instance_id, and schema_version.
*/
export class EventBuilder {
private readonly config: ResolvedConfig;
constructor(config: ResolvedConfig) {
this.config = config;
}
/**
* Build a complete TaskCompletionEvent from the given parameters.
* Automatically fills in event_id, timestamp, instance_id, and schema_version.
*/
build(params: EventBuilderParams): TaskCompletionEvent {
return {
instance_id: this.config.instanceId,
event_id: crypto.randomUUID(),
schema_version: '1.0',
timestamp: new Date().toISOString(),
...params,
};
}
}

36
src/index.ts Normal file
View File

@@ -0,0 +1,36 @@
export { TelemetryClient } from './client.js';
export { EventBuilder } from './event-builder.js';
export { EventQueue } from './queue.js';
export { BatchSubmitter } from './submitter.js';
export { PredictionCache } from './prediction-cache.js';
export { resolveConfig } from './config.js';
export type { TelemetryConfig, ResolvedConfig } from './config.js';
export type { EventBuilderParams } from './event-builder.js';
export type { SubmitResult } from './submitter.js';
// Re-export all types
export {
TaskType,
Complexity,
Harness,
Provider,
QualityGate,
Outcome,
RepoSizeCategory,
} from './types/index.js';
export type {
TaskCompletionEvent,
TokenDistribution,
CorrectionFactors,
QualityPrediction,
PredictionData,
PredictionMetadata,
PredictionResponse,
PredictionQuery,
BatchEventRequest,
BatchEventResult,
BatchEventResponse,
BatchPredictionRequest,
BatchPredictionResponse,
} from './types/index.js';

59
src/prediction-cache.ts Normal file
View File

@@ -0,0 +1,59 @@
import { PredictionQuery, PredictionResponse } from './types/predictions.js';
interface CacheEntry {
response: PredictionResponse;
expiresAt: number;
}
/**
* In-memory cache for prediction responses with TTL-based expiry.
*/
export class PredictionCache {
private readonly cache = new Map<string, CacheEntry>();
private readonly ttlMs: number;
constructor(ttlMs: number) {
this.ttlMs = ttlMs;
}
/** Build a deterministic cache key from a prediction query. */
private buildKey(query: PredictionQuery): string {
return `${query.task_type}:${query.model}:${query.provider}:${query.complexity}`;
}
/** Get a cached prediction. Returns null if not cached or expired. */
get(query: PredictionQuery): PredictionResponse | null {
const key = this.buildKey(query);
const entry = this.cache.get(key);
if (!entry) {
return null;
}
if (Date.now() > entry.expiresAt) {
this.cache.delete(key);
return null;
}
return entry.response;
}
/** Store a prediction response with TTL. */
set(query: PredictionQuery, response: PredictionResponse): void {
const key = this.buildKey(query);
this.cache.set(key, {
response,
expiresAt: Date.now() + this.ttlMs,
});
}
/** Clear all cached predictions. */
clear(): void {
this.cache.clear();
}
/** Number of entries currently in cache (including potentially expired). */
get size(): number {
return this.cache.size;
}
}

49
src/queue.ts Normal file
View File

@@ -0,0 +1,49 @@
import { TaskCompletionEvent } from './types/events.js';
/**
* Bounded FIFO event queue. When the queue is full, the oldest events
* are evicted to make room for new ones.
*/
export class EventQueue {
private readonly items: TaskCompletionEvent[] = [];
private readonly maxSize: number;
constructor(maxSize: number) {
this.maxSize = maxSize;
}
/** Add an event to the queue. Evicts the oldest event if at capacity. */
enqueue(event: TaskCompletionEvent): void {
if (this.items.length >= this.maxSize) {
this.items.shift();
}
this.items.push(event);
}
/**
* Remove and return up to `maxItems` events from the front of the queue.
* Returns an empty array if the queue is empty.
*/
drain(maxItems: number): TaskCompletionEvent[] {
const count = Math.min(maxItems, this.items.length);
return this.items.splice(0, count);
}
/** Prepend events back to the front of the queue (for re-enqueue on failure). */
prepend(events: TaskCompletionEvent[]): void {
// If prepending would exceed max, only keep as many as will fit
const available = this.maxSize - this.items.length;
const toAdd = events.slice(0, available);
this.items.unshift(...toAdd);
}
/** Current number of events in the queue. */
get size(): number {
return this.items.length;
}
/** Whether the queue is empty. */
get isEmpty(): boolean {
return this.items.length === 0;
}
}

138
src/submitter.ts Normal file
View File

@@ -0,0 +1,138 @@
import { ResolvedConfig } from './config.js';
import { TaskCompletionEvent } from './types/events.js';
import { BatchEventResponse } from './types/common.js';
const SDK_VERSION = '0.1.0';
const USER_AGENT = `mosaic-telemetry-client-js/${SDK_VERSION}`;
export interface SubmitResult {
success: boolean;
response?: BatchEventResponse;
retryAfterMs?: number;
error?: Error;
}
/**
* Handles HTTP submission of event batches to the telemetry server.
* Supports exponential backoff with jitter and Retry-After header handling.
*/
export class BatchSubmitter {
private readonly config: ResolvedConfig;
constructor(config: ResolvedConfig) {
this.config = config;
}
/**
* Submit a batch of events to the server.
* Retries with exponential backoff on transient failures.
*/
async submit(events: TaskCompletionEvent[]): Promise<SubmitResult> {
if (this.config.dryRun) {
return {
success: true,
response: {
accepted: events.length,
rejected: 0,
results: events.map((e) => ({
event_id: e.event_id,
status: 'accepted' as const,
})),
},
};
}
let lastError: Error | undefined;
for (let attempt = 0; attempt <= this.config.maxRetries; attempt++) {
if (attempt > 0) {
const delayMs = this.backoffDelay(attempt);
await this.sleep(delayMs);
}
try {
const result = await this.attemptSubmit(events);
if (result.retryAfterMs !== undefined) {
// 429: wait and retry
await this.sleep(result.retryAfterMs);
continue;
}
return result;
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error));
// Continue to next retry attempt
}
}
return {
success: false,
error: lastError ?? new Error('Max retries exceeded'),
};
}
private async attemptSubmit(
events: TaskCompletionEvent[],
): Promise<SubmitResult> {
const url = `${this.config.serverUrl}/v1/events/batch`;
const controller = new AbortController();
const timeout = setTimeout(
() => controller.abort(),
this.config.requestTimeoutMs,
);
try {
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${this.config.apiKey}`,
'User-Agent': USER_AGENT,
},
body: JSON.stringify({ events }),
signal: controller.signal,
});
if (response.status === 429) {
const retryAfter = response.headers.get('Retry-After');
const retryAfterMs = retryAfter ? parseInt(retryAfter, 10) * 1000 : 5000;
return { success: false, retryAfterMs };
}
if (response.status === 403) {
return {
success: false,
error: new Error(
`Forbidden: API key does not match instance_id (HTTP 403)`,
),
};
}
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const body = (await response.json()) as BatchEventResponse;
return { success: true, response: body };
} finally {
clearTimeout(timeout);
}
}
/**
* Exponential backoff with jitter.
* Base = 1s, max = 60s.
*/
private backoffDelay(attempt: number): number {
const baseMs = 1000;
const maxMs = 60_000;
const exponential = Math.min(maxMs, baseMs * Math.pow(2, attempt - 1));
const jitter = Math.random() * exponential * 0.5;
return exponential + jitter;
}
private sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
}

26
src/types/common.ts Normal file
View File

@@ -0,0 +1,26 @@
import { TaskCompletionEvent } from './events.js';
import { PredictionQuery, PredictionResponse } from './predictions.js';
export interface BatchEventRequest {
events: TaskCompletionEvent[];
}
export interface BatchEventResult {
event_id: string;
status: 'accepted' | 'rejected';
error?: string | null;
}
export interface BatchEventResponse {
accepted: number;
rejected: number;
results: BatchEventResult[];
}
export interface BatchPredictionRequest {
queries: PredictionQuery[];
}
export interface BatchPredictionResponse {
results: PredictionResponse[];
}

94
src/types/events.ts Normal file
View File

@@ -0,0 +1,94 @@
export enum TaskType {
PLANNING = 'planning',
IMPLEMENTATION = 'implementation',
CODE_REVIEW = 'code_review',
TESTING = 'testing',
DEBUGGING = 'debugging',
REFACTORING = 'refactoring',
DOCUMENTATION = 'documentation',
CONFIGURATION = 'configuration',
SECURITY_AUDIT = 'security_audit',
UNKNOWN = 'unknown',
}
export enum Complexity {
LOW = 'low',
MEDIUM = 'medium',
HIGH = 'high',
CRITICAL = 'critical',
}
export enum Harness {
CLAUDE_CODE = 'claude_code',
OPENCODE = 'opencode',
KILO_CODE = 'kilo_code',
AIDER = 'aider',
API_DIRECT = 'api_direct',
OLLAMA_LOCAL = 'ollama_local',
CUSTOM = 'custom',
UNKNOWN = 'unknown',
}
export enum Provider {
ANTHROPIC = 'anthropic',
OPENAI = 'openai',
OPENROUTER = 'openrouter',
OLLAMA = 'ollama',
GOOGLE = 'google',
MISTRAL = 'mistral',
CUSTOM = 'custom',
UNKNOWN = 'unknown',
}
export enum QualityGate {
BUILD = 'build',
LINT = 'lint',
TEST = 'test',
COVERAGE = 'coverage',
TYPECHECK = 'typecheck',
SECURITY = 'security',
}
export enum Outcome {
SUCCESS = 'success',
FAILURE = 'failure',
PARTIAL = 'partial',
TIMEOUT = 'timeout',
}
export enum RepoSizeCategory {
TINY = 'tiny',
SMALL = 'small',
MEDIUM = 'medium',
LARGE = 'large',
HUGE = 'huge',
}
export interface TaskCompletionEvent {
instance_id: string;
event_id: string;
schema_version: string;
timestamp: string;
task_duration_ms: number;
task_type: TaskType;
complexity: Complexity;
harness: Harness;
model: string;
provider: Provider;
estimated_input_tokens: number;
estimated_output_tokens: number;
actual_input_tokens: number;
actual_output_tokens: number;
estimated_cost_usd_micros: number;
actual_cost_usd_micros: number;
quality_gate_passed: boolean;
quality_gates_run: QualityGate[];
quality_gates_failed: QualityGate[];
context_compactions: number;
context_rotations: number;
context_utilization_final: number;
outcome: Outcome;
retry_count: number;
language?: string | null;
repo_size_category?: RepoSizeCategory | null;
}

28
src/types/index.ts Normal file
View File

@@ -0,0 +1,28 @@
export {
TaskType,
Complexity,
Harness,
Provider,
QualityGate,
Outcome,
RepoSizeCategory,
type TaskCompletionEvent,
} from './events.js';
export {
type TokenDistribution,
type CorrectionFactors,
type QualityPrediction,
type PredictionData,
type PredictionMetadata,
type PredictionResponse,
type PredictionQuery,
} from './predictions.js';
export {
type BatchEventRequest,
type BatchEventResult,
type BatchEventResponse,
type BatchPredictionRequest,
type BatchPredictionResponse,
} from './common.js';

50
src/types/predictions.ts Normal file
View File

@@ -0,0 +1,50 @@
import { Complexity, Provider, TaskType } from './events.js';
export interface TokenDistribution {
p10: number;
p25: number;
median: number;
p75: number;
p90: number;
}
export interface CorrectionFactors {
input: number;
output: number;
}
export interface QualityPrediction {
gate_pass_rate: number;
success_rate: number;
}
export interface PredictionData {
input_tokens: TokenDistribution;
output_tokens: TokenDistribution;
cost_usd_micros: Record<string, number>;
duration_ms: Record<string, number>;
correction_factors: CorrectionFactors;
quality: QualityPrediction;
}
export interface PredictionMetadata {
sample_size: number;
fallback_level: number;
confidence: 'none' | 'low' | 'medium' | 'high';
last_updated: string | null;
dimensions_matched?: Record<string, string | null> | null;
fallback_note?: string | null;
cache_hit: boolean;
}
export interface PredictionResponse {
prediction: PredictionData | null;
metadata: PredictionMetadata;
}
export interface PredictionQuery {
task_type: TaskType;
model: string;
provider: Provider;
complexity: Complexity;
}