Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| bf001813b8 | |||
| 20f56edb49 | |||
| 0b0fb9ab7b | |||
| 3b5c47af84 | |||
| ea00334ded | |||
| 493bc72601 | |||
| 9df760cab2 | |||
| 07bf9dd9b4 | |||
| 231a799a46 |
142
.woodpecker.yml
Normal file
142
.woodpecker.yml
Normal file
@@ -0,0 +1,142 @@
|
||||
when:
|
||||
- event: [push, pull_request, manual]
|
||||
|
||||
variables:
|
||||
- &node_image "node:22-alpine"
|
||||
|
||||
steps:
|
||||
install:
|
||||
image: *node_image
|
||||
commands:
|
||||
- corepack enable
|
||||
- npm ci
|
||||
|
||||
lint:
|
||||
image: *node_image
|
||||
commands:
|
||||
- npm run lint
|
||||
depends_on:
|
||||
- install
|
||||
|
||||
typecheck:
|
||||
image: *node_image
|
||||
commands:
|
||||
- npm run typecheck
|
||||
depends_on:
|
||||
- install
|
||||
|
||||
format-check:
|
||||
image: *node_image
|
||||
commands:
|
||||
- npm run format:check
|
||||
depends_on:
|
||||
- install
|
||||
|
||||
security-audit:
|
||||
image: *node_image
|
||||
commands:
|
||||
- npm audit --audit-level=high
|
||||
depends_on:
|
||||
- install
|
||||
|
||||
test:
|
||||
image: *node_image
|
||||
commands:
|
||||
- npm run test:coverage
|
||||
depends_on:
|
||||
- install
|
||||
|
||||
build:
|
||||
image: *node_image
|
||||
commands:
|
||||
- npm run build
|
||||
depends_on:
|
||||
- lint
|
||||
- typecheck
|
||||
- format-check
|
||||
- security-audit
|
||||
- test
|
||||
|
||||
publish-release:
|
||||
image: *node_image
|
||||
environment:
|
||||
GITEA_TOKEN:
|
||||
from_secret: gitea_token
|
||||
commands:
|
||||
- npm run build
|
||||
- |
|
||||
echo "//git.mosaicstack.dev/api/packages/mosaic/npm/:_authToken=$$GITEA_TOKEN" > .npmrc
|
||||
echo "@mosaicstack:registry=https://git.mosaicstack.dev/api/packages/mosaic/npm/" >> .npmrc
|
||||
- |
|
||||
CURRENT=$$(node -p "require('./package.json').version")
|
||||
PUBLISHED=$$(npm view @mosaicstack/telemetry-client version 2>/dev/null || echo "0.0.0")
|
||||
if [ "$$CURRENT" = "$$PUBLISHED" ]; then
|
||||
echo "Version $$CURRENT already published, skipping"
|
||||
exit 0
|
||||
fi
|
||||
echo "Publishing $$CURRENT (was $$PUBLISHED)"
|
||||
npm publish --access public
|
||||
when:
|
||||
- branch: main
|
||||
event: [push, manual, tag]
|
||||
depends_on:
|
||||
- build
|
||||
|
||||
publish-dev:
|
||||
image: *node_image
|
||||
environment:
|
||||
GITEA_TOKEN:
|
||||
from_secret: gitea_token
|
||||
CI_COMMIT_SHA: ${CI_COMMIT_SHA}
|
||||
commands:
|
||||
- npm run build
|
||||
- |
|
||||
echo "//git.mosaicstack.dev/api/packages/mosaic/npm/:_authToken=$$GITEA_TOKEN" > .npmrc
|
||||
echo "@mosaicstack:registry=https://git.mosaicstack.dev/api/packages/mosaic/npm/" >> .npmrc
|
||||
- |
|
||||
BASE=$$(node -p "require('./package.json').version")
|
||||
TIMESTAMP=$$(date -u +%Y%m%d%H%M%S)
|
||||
DEV_VERSION="$${BASE}-dev.$${TIMESTAMP}"
|
||||
echo "Publishing dev version $$DEV_VERSION"
|
||||
node -e "const p=require('./package.json'); p.version='$$DEV_VERSION'; require('fs').writeFileSync('package.json', JSON.stringify(p, null, 2)+'\n')"
|
||||
npm publish --access public --tag dev
|
||||
when:
|
||||
- branch: develop
|
||||
event: [push, manual]
|
||||
depends_on:
|
||||
- build
|
||||
|
||||
link-package:
|
||||
image: alpine:3
|
||||
environment:
|
||||
GITEA_TOKEN:
|
||||
from_secret: gitea_token
|
||||
commands:
|
||||
- apk add --no-cache curl
|
||||
- |
|
||||
set -e
|
||||
PKG="%40mosaicstack%2Ftelemetry-client"
|
||||
for attempt in 1 2 3; do
|
||||
STATUS=$$(curl -s -o /tmp/link-response.txt -w "%{http_code}" -X POST \
|
||||
-H "Authorization: token $$GITEA_TOKEN" \
|
||||
"https://git.mosaicstack.dev/api/v1/packages/mosaic/npm/$$PKG/-/link/telemetry-client-js")
|
||||
if [ "$$STATUS" = "201" ] || [ "$$STATUS" = "204" ]; then
|
||||
echo "Package linked to repository"
|
||||
exit 0
|
||||
elif [ "$$STATUS" = "400" ]; then
|
||||
echo "Package already linked (OK)"
|
||||
exit 0
|
||||
elif [ $$attempt -lt 3 ]; then
|
||||
echo "Status $$STATUS, retrying in 5s (attempt $$attempt/3)..."
|
||||
sleep 5
|
||||
else
|
||||
echo "FAILED to link package (status $$STATUS)"
|
||||
cat /tmp/link-response.txt
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
when:
|
||||
- branch: [main, develop]
|
||||
event: [push, manual, tag]
|
||||
depends_on:
|
||||
- build
|
||||
72
AGENTS.md
Normal file
72
AGENTS.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# mosaic-telemetry-client-js — Agent Context
|
||||
|
||||
> Patterns, gotchas, and orchestrator integration for AI agents working on this project.
|
||||
> **Update this file** when you discover reusable patterns or non-obvious requirements.
|
||||
|
||||
## Codebase Patterns
|
||||
|
||||
<!-- Add project-specific patterns as you discover them -->
|
||||
<!-- Examples: -->
|
||||
<!-- - Use `httpx.AsyncClient` for external HTTP calls -->
|
||||
<!-- - All routes require authentication via `Depends(get_current_user)` -->
|
||||
<!-- - Config is loaded from environment variables via `settings.py` -->
|
||||
|
||||
## Common Gotchas
|
||||
|
||||
<!-- Add things that trip up agents -->
|
||||
<!-- Examples: -->
|
||||
<!-- - Remember to run migrations after schema changes -->
|
||||
<!-- - Frontend env vars need NEXT_PUBLIC_ prefix -->
|
||||
<!-- - Tests require a running PostgreSQL instance -->
|
||||
|
||||
## Quality Gates
|
||||
|
||||
**All must pass before any commit:**
|
||||
|
||||
```bash
|
||||
npm run lint ${QUALITY_GATES}${QUALITY_GATES} npm run typecheck ${QUALITY_GATES}${QUALITY_GATES} npm test
|
||||
```
|
||||
|
||||
## Orchestrator Integration
|
||||
|
||||
### Task Prefix
|
||||
Use `MOSAIC-TELEMETRY-CLIENT-JS` as the prefix for orchestrated tasks (e.g., `MOSAIC-TELEMETRY-CLIENT-JS-SEC-001`).
|
||||
|
||||
### Package/Directory Names
|
||||
<!-- List key directories the orchestrator needs to know about -->
|
||||
|
||||
| Directory | Purpose |
|
||||
|-----------|---------|
|
||||
| `src/` | Main source code |
|
||||
| `tests/` | Test files |
|
||||
| `docs/scratchpads/` | Working documents |
|
||||
|
||||
### Worker Checklist
|
||||
When completing an orchestrated task:
|
||||
1. Read the finding details from the report
|
||||
2. Implement the fix following existing code patterns
|
||||
3. Run quality gates (ALL must pass)
|
||||
4. Commit with: `git commit -m "fix({finding_id}): brief description"`
|
||||
5. Report result as JSON to orchestrator
|
||||
|
||||
### Post-Coding Review
|
||||
After implementing changes, the orchestrator will run:
|
||||
1. **Codex code review** — `~/.claude/scripts/codex/codex-code-review.sh --uncommitted`
|
||||
2. **Codex security review** — `~/.claude/scripts/codex/codex-security-review.sh --uncommitted`
|
||||
3. If blockers/critical findings: remediation task created
|
||||
4. If clean: task marked done
|
||||
|
||||
## Directory-Specific Context
|
||||
|
||||
<!-- Add sub-AGENTS.md files in subdirectories if needed -->
|
||||
<!-- Example: -->
|
||||
<!-- - `src/api/AGENTS.md` — API-specific patterns -->
|
||||
<!-- - `src/components/AGENTS.md` — Component conventions -->
|
||||
|
||||
## Testing Approaches
|
||||
|
||||
<!-- Document how tests should be written for this project -->
|
||||
<!-- Examples: -->
|
||||
<!-- - Unit tests use pytest with fixtures in conftest.py -->
|
||||
<!-- - Integration tests require DATABASE_URL env var -->
|
||||
<!-- - E2E tests use Playwright -->
|
||||
52
CLAUDE.md
52
CLAUDE.md
@@ -28,3 +28,55 @@ npm run build # Build to dist/
|
||||
- `track()` never throws — catches everything, routes to `onError` callback
|
||||
- Zero runtime deps: uses native `fetch` (Node 18+), `crypto.randomUUID()`, `setInterval`
|
||||
- All types are standalone — no dependency on the telemetry server package
|
||||
|
||||
## Conditional Documentation Loading
|
||||
|
||||
**Read the relevant guide before starting work:**
|
||||
|
||||
| Task Type | Guide |
|
||||
|-----------|-------|
|
||||
| Bootstrapping a new project | `~/.claude/agent-guides/bootstrap.md` |
|
||||
| Orchestrating autonomous tasks | `~/.claude/agent-guides/orchestrator.md` |
|
||||
| Ralph autonomous development | `~/.claude/agent-guides/ralph-autonomous.md` |
|
||||
| Frontend development | `~/.claude/agent-guides/frontend.md` |
|
||||
| Backend/API development | `~/.claude/agent-guides/backend.md` |
|
||||
| TypeScript strict typing | `~/.claude/agent-guides/typescript.md` |
|
||||
| Code review | `~/.claude/agent-guides/code-review.md` |
|
||||
| Authentication/Authorization | `~/.claude/agent-guides/authentication.md` |
|
||||
| Infrastructure/DevOps | `~/.claude/agent-guides/infrastructure.md` |
|
||||
| QA/Testing | `~/.claude/agent-guides/qa-testing.md` |
|
||||
| Secrets management (Vault) | `~/.claude/agent-guides/vault-secrets.md` |
|
||||
|
||||
|
||||
## Commits
|
||||
|
||||
```
|
||||
<type>(#issue): Brief description
|
||||
|
||||
Detailed explanation if needed.
|
||||
|
||||
Fixes #123
|
||||
```
|
||||
|
||||
Types: `feat`, `fix`, `docs`, `test`, `refactor`, `chore`
|
||||
|
||||
|
||||
## Secrets Management
|
||||
|
||||
**NEVER hardcode secrets.** Use `.env` files (gitignored) or a secrets manager.
|
||||
|
||||
```bash
|
||||
# .env.example is committed (with placeholders)
|
||||
# .env is NOT committed (contains real values)
|
||||
```
|
||||
|
||||
Ensure `.gitignore` includes `.env*` (except `.env.example`).
|
||||
|
||||
|
||||
## Multi-Agent Coordination
|
||||
|
||||
When multiple agents work on this project:
|
||||
1. `git pull --rebase` before editing
|
||||
2. `git pull --rebase` before pushing
|
||||
3. If conflicts, **alert the user** — don't auto-resolve data conflicts
|
||||
|
||||
|
||||
152
README.md
152
README.md
@@ -2,28 +2,58 @@
|
||||
|
||||
TypeScript client SDK for [Mosaic Stack Telemetry](https://tel.mosaicstack.dev). Reports task-completion metrics from AI coding harnesses and queries crowd-sourced predictions.
|
||||
|
||||
**Zero runtime dependencies** — uses native `fetch`, `crypto.randomUUID()`, and `setInterval`.
|
||||
**Zero runtime dependencies** — uses native `fetch`, `crypto.randomUUID()`, and `setInterval`. Requires Node.js 18+.
|
||||
|
||||
**Targets Mosaic Telemetry API v1** (`/v1/` endpoints, event schema version `1.0`).
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Latest stable release (from main)
|
||||
npm install @mosaicstack/telemetry-client
|
||||
|
||||
# Latest dev build (from develop)
|
||||
npm install @mosaicstack/telemetry-client@dev
|
||||
```
|
||||
|
||||
The Gitea npm registry must be configured in `.npmrc`:
|
||||
|
||||
```ini
|
||||
@mosaicstack:registry=https://git.mosaicstack.dev/api/packages/mosaic/npm/
|
||||
```
|
||||
|
||||
### Versioning
|
||||
|
||||
| Branch | Dist-tag | Version format | Example |
|
||||
|--------|----------|----------------|---------|
|
||||
| `main` | `latest` | `{version}` | `0.1.0` |
|
||||
| `develop` | `dev` | `{version}-dev.{YYYYMMDDHHmmss}` | `0.1.0-dev.20260215050000` |
|
||||
|
||||
Every push to `develop` publishes a new prerelease. Stable releases publish from `main` only when the version in `package.json` changes.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```typescript
|
||||
import { TelemetryClient, TaskType, Complexity, Harness, Provider, Outcome } from '@mosaicstack/telemetry-client';
|
||||
import {
|
||||
TelemetryClient,
|
||||
TaskType,
|
||||
Complexity,
|
||||
Harness,
|
||||
Provider,
|
||||
Outcome,
|
||||
QualityGate,
|
||||
} from '@mosaicstack/telemetry-client';
|
||||
|
||||
// 1. Create and start the client
|
||||
const client = new TelemetryClient({
|
||||
serverUrl: 'https://tel.mosaicstack.dev',
|
||||
apiKey: 'your-64-char-hex-api-key',
|
||||
instanceId: 'your-instance-uuid',
|
||||
serverUrl: 'https://tel-api.mosaicstack.dev',
|
||||
apiKey: process.env.TELEMETRY_API_KEY!,
|
||||
instanceId: process.env.TELEMETRY_INSTANCE_ID!,
|
||||
});
|
||||
|
||||
client.start();
|
||||
client.start(); // begins background batch submission every 5 minutes
|
||||
|
||||
// Build and track an event
|
||||
// 2. Build and track an event
|
||||
const event = client.eventBuilder.build({
|
||||
task_duration_ms: 45000,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
@@ -31,83 +61,101 @@ const event = client.eventBuilder.build({
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 5000,
|
||||
estimated_output_tokens: 2000,
|
||||
actual_input_tokens: 5500,
|
||||
actual_output_tokens: 2200,
|
||||
estimated_cost_usd_micros: 30000,
|
||||
actual_cost_usd_micros: 33000,
|
||||
estimated_input_tokens: 105000,
|
||||
estimated_output_tokens: 45000,
|
||||
actual_input_tokens: 112340,
|
||||
actual_output_tokens: 38760,
|
||||
estimated_cost_usd_micros: 630000,
|
||||
actual_cost_usd_micros: 919200,
|
||||
quality_gate_passed: true,
|
||||
quality_gates_run: [],
|
||||
quality_gates_run: [QualityGate.BUILD, QualityGate.LINT, QualityGate.TEST],
|
||||
quality_gates_failed: [],
|
||||
context_compactions: 0,
|
||||
context_compactions: 2,
|
||||
context_rotations: 0,
|
||||
context_utilization_final: 0.4,
|
||||
context_utilization_final: 0.72,
|
||||
outcome: Outcome.SUCCESS,
|
||||
retry_count: 0,
|
||||
language: 'typescript',
|
||||
repo_size_category: 'medium',
|
||||
});
|
||||
|
||||
client.track(event);
|
||||
client.track(event); // queues the event (never throws)
|
||||
|
||||
// When shutting down
|
||||
await client.stop();
|
||||
```
|
||||
|
||||
## Querying Predictions
|
||||
|
||||
```typescript
|
||||
const query = {
|
||||
// 3. Query predictions
|
||||
const prediction = client.getPrediction({
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
provider: Provider.ANTHROPIC,
|
||||
complexity: Complexity.MEDIUM,
|
||||
};
|
||||
});
|
||||
|
||||
// Fetch from server and cache locally
|
||||
await client.refreshPredictions([query]);
|
||||
|
||||
// Get cached prediction (returns null if not cached)
|
||||
const prediction = client.getPrediction(query);
|
||||
if (prediction?.prediction) {
|
||||
console.log('Median input tokens:', prediction.prediction.input_tokens.median);
|
||||
console.log('Median cost (microdollars):', prediction.prediction.cost_usd_micros.median);
|
||||
}
|
||||
// 4. Shut down gracefully (flushes remaining events)
|
||||
await client.stop();
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
```typescript
|
||||
const client = new TelemetryClient({
|
||||
serverUrl: 'https://tel.mosaicstack.dev', // Required
|
||||
apiKey: 'your-api-key', // Required (64-char hex)
|
||||
instanceId: 'your-uuid', // Required
|
||||
| Option | Type | Default | Description |
|
||||
|--------|------|---------|-------------|
|
||||
| `serverUrl` | `string` | **required** | Telemetry API base URL |
|
||||
| `apiKey` | `string` | **required** | Bearer token for authentication |
|
||||
| `instanceId` | `string` | **required** | UUID identifying this instance |
|
||||
| `enabled` | `boolean` | `true` | Set `false` to disable — `track()` becomes a no-op |
|
||||
| `submitIntervalMs` | `number` | `300_000` | Background flush interval (5 min) |
|
||||
| `maxQueueSize` | `number` | `1000` | Max queued events before FIFO eviction |
|
||||
| `batchSize` | `number` | `100` | Events per batch submission (server max: 100) |
|
||||
| `requestTimeoutMs` | `number` | `10_000` | HTTP request timeout |
|
||||
| `predictionCacheTtlMs` | `number` | `21_600_000` | Prediction cache TTL (6 hours) |
|
||||
| `dryRun` | `boolean` | `false` | Log events instead of sending them |
|
||||
| `maxRetries` | `number` | `3` | Retry attempts with exponential backoff |
|
||||
| `onError` | `(error: Error) => void` | silent | Error callback |
|
||||
|
||||
// Optional
|
||||
enabled: true, // Set false to disable (track() becomes no-op)
|
||||
submitIntervalMs: 300_000, // Background flush interval (default: 5 min)
|
||||
maxQueueSize: 1000, // Max queued events (default: 1000, FIFO eviction)
|
||||
batchSize: 100, // Events per batch (default/max: 100)
|
||||
requestTimeoutMs: 10_000, // HTTP timeout (default: 10s)
|
||||
predictionCacheTtlMs: 21_600_000, // Prediction cache TTL (default: 6 hours)
|
||||
dryRun: false, // Log events instead of sending
|
||||
maxRetries: 3, // Retry attempts on failure
|
||||
onError: (err) => console.error(err), // Error callback
|
||||
## Querying Predictions
|
||||
|
||||
Predictions are crowd-sourced token/cost/duration estimates from the telemetry API. The SDK caches them locally with a configurable TTL.
|
||||
|
||||
```typescript
|
||||
// Fetch predictions from the server and cache locally
|
||||
await client.refreshPredictions([
|
||||
{ task_type: TaskType.IMPLEMENTATION, model: 'claude-sonnet-4-5-20250929', provider: Provider.ANTHROPIC, complexity: Complexity.MEDIUM },
|
||||
{ task_type: TaskType.TESTING, model: 'claude-haiku-4-5-20251001', provider: Provider.ANTHROPIC, complexity: Complexity.LOW },
|
||||
]);
|
||||
|
||||
// Read from cache (returns null if not cached or expired)
|
||||
const prediction = client.getPrediction({
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
provider: Provider.ANTHROPIC,
|
||||
complexity: Complexity.MEDIUM,
|
||||
});
|
||||
|
||||
if (prediction?.prediction) {
|
||||
console.log('Median input tokens:', prediction.prediction.input_tokens.median);
|
||||
console.log('Median cost ($):', prediction.prediction.cost_usd_micros.median / 1_000_000);
|
||||
console.log('Confidence:', prediction.metadata.confidence);
|
||||
}
|
||||
```
|
||||
|
||||
## Dry-Run Mode
|
||||
|
||||
For testing without sending data:
|
||||
For development and testing without sending data to the server:
|
||||
|
||||
```typescript
|
||||
const client = new TelemetryClient({
|
||||
serverUrl: 'https://tel.mosaicstack.dev',
|
||||
serverUrl: 'https://tel-api.mosaicstack.dev',
|
||||
apiKey: 'test-key',
|
||||
instanceId: 'test-uuid',
|
||||
dryRun: true,
|
||||
});
|
||||
```
|
||||
|
||||
In dry-run mode, `track()` still queues events and `flush()` still runs, but the `BatchSubmitter` returns synthetic `accepted` responses without making HTTP calls.
|
||||
|
||||
## Documentation
|
||||
|
||||
- **[Integration Guide](docs/integration-guide.md)** — Next.js and Node.js examples, environment-specific configuration, error handling patterns
|
||||
- **[API Reference](docs/api-reference.md)** — Full reference for all exported classes, methods, types, and enums
|
||||
|
||||
## License
|
||||
|
||||
MPL-2.0
|
||||
|
||||
602
docs/api-reference.md
Normal file
602
docs/api-reference.md
Normal file
@@ -0,0 +1,602 @@
|
||||
# API Reference
|
||||
|
||||
Complete reference for all classes, methods, types, and enums exported by `@mosaicstack/telemetry-client`.
|
||||
|
||||
**SDK version:** 0.1.0
|
||||
**Targets:** Mosaic Telemetry API v1, event schema version `1.0`
|
||||
|
||||
---
|
||||
|
||||
## TelemetryClient
|
||||
|
||||
Main entry point. Queues task-completion events for background batch submission and provides access to cached predictions.
|
||||
|
||||
```typescript
|
||||
import { TelemetryClient } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
### Constructor
|
||||
|
||||
```typescript
|
||||
new TelemetryClient(config: TelemetryConfig)
|
||||
```
|
||||
|
||||
Creates a new client instance. Does **not** start background submission — call `start()` to begin.
|
||||
|
||||
### Properties
|
||||
|
||||
| Property | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `eventBuilder` | `EventBuilder` | Builder for constructing `TaskCompletionEvent` objects |
|
||||
| `queueSize` | `number` | Number of events currently in the queue |
|
||||
| `isRunning` | `boolean` | Whether background submission is active |
|
||||
|
||||
### Methods
|
||||
|
||||
#### `start(): void`
|
||||
|
||||
Start background batch submission via `setInterval`. Idempotent — calling `start()` multiple times has no effect.
|
||||
|
||||
#### `stop(): Promise<void>`
|
||||
|
||||
Stop background submission and flush all remaining events. Idempotent. Returns a promise that resolves when the final flush completes.
|
||||
|
||||
#### `track(event: TaskCompletionEvent): void`
|
||||
|
||||
Queue an event for batch submission. **Never throws** — all errors are caught and routed to the `onError` callback.
|
||||
|
||||
When `enabled` is `false`, this method returns immediately without queuing.
|
||||
|
||||
When the queue is at capacity (`maxQueueSize`), the oldest event is evicted to make room.
|
||||
|
||||
#### `getPrediction(query: PredictionQuery): PredictionResponse | null`
|
||||
|
||||
Get a cached prediction for the given query dimensions. Returns `null` if no prediction is cached or the cache entry has expired.
|
||||
|
||||
#### `refreshPredictions(queries: PredictionQuery[]): Promise<void>`
|
||||
|
||||
Fetch predictions from the server via `POST /v1/predictions/batch` and store them in the local cache. The predictions endpoint is public — no authentication required.
|
||||
|
||||
Accepts up to 50 queries per call (server limit).
|
||||
|
||||
---
|
||||
|
||||
## EventBuilder
|
||||
|
||||
Convenience builder that auto-fills `event_id`, `timestamp`, `instance_id`, and `schema_version`.
|
||||
|
||||
```typescript
|
||||
import { EventBuilder } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
Access via `client.eventBuilder` — you don't normally construct this directly.
|
||||
|
||||
### Methods
|
||||
|
||||
#### `build(params: EventBuilderParams): TaskCompletionEvent`
|
||||
|
||||
Build a complete `TaskCompletionEvent` from the given parameters.
|
||||
|
||||
Auto-generated fields:
|
||||
- `event_id` — `crypto.randomUUID()`
|
||||
- `timestamp` — `new Date().toISOString()`
|
||||
- `instance_id` — from client config
|
||||
- `schema_version` — `"1.0"`
|
||||
|
||||
---
|
||||
|
||||
## EventQueue
|
||||
|
||||
Bounded FIFO queue for telemetry events. Used internally by `TelemetryClient`.
|
||||
|
||||
```typescript
|
||||
import { EventQueue } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
### Constructor
|
||||
|
||||
```typescript
|
||||
new EventQueue(maxSize: number)
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Property | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `size` | `number` | Current number of events in the queue |
|
||||
| `isEmpty` | `boolean` | Whether the queue is empty |
|
||||
|
||||
### Methods
|
||||
|
||||
#### `enqueue(event: TaskCompletionEvent): void`
|
||||
|
||||
Add an event. Evicts the oldest event if at capacity.
|
||||
|
||||
#### `drain(maxItems: number): TaskCompletionEvent[]`
|
||||
|
||||
Remove and return up to `maxItems` events from the front.
|
||||
|
||||
#### `prepend(events: TaskCompletionEvent[]): void`
|
||||
|
||||
Prepend events back to the front (used for re-enqueue on submission failure). Respects `maxSize` — excess events are dropped.
|
||||
|
||||
---
|
||||
|
||||
## BatchSubmitter
|
||||
|
||||
Handles HTTP submission of event batches with retry logic.
|
||||
|
||||
```typescript
|
||||
import { BatchSubmitter } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
### Methods
|
||||
|
||||
#### `submit(events: TaskCompletionEvent[]): Promise<SubmitResult>`
|
||||
|
||||
Submit a batch to `POST /v1/events/batch`. Retries with exponential backoff (1s base, 60s max, with jitter) on transient failures. Respects the server's `Retry-After` header on HTTP 429.
|
||||
|
||||
In dry-run mode, returns a synthetic success response without making HTTP calls.
|
||||
|
||||
---
|
||||
|
||||
## PredictionCache
|
||||
|
||||
In-memory TTL cache for prediction responses.
|
||||
|
||||
```typescript
|
||||
import { PredictionCache } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
### Constructor
|
||||
|
||||
```typescript
|
||||
new PredictionCache(ttlMs: number)
|
||||
```
|
||||
|
||||
### Properties
|
||||
|
||||
| Property | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `size` | `number` | Number of entries in cache (may include expired entries) |
|
||||
|
||||
### Methods
|
||||
|
||||
#### `get(query: PredictionQuery): PredictionResponse | null`
|
||||
|
||||
Retrieve a cached prediction. Returns `null` if not cached or expired (expired entries are lazily deleted).
|
||||
|
||||
#### `set(query: PredictionQuery, response: PredictionResponse): void`
|
||||
|
||||
Store a prediction with TTL.
|
||||
|
||||
#### `clear(): void`
|
||||
|
||||
Clear all cached predictions.
|
||||
|
||||
---
|
||||
|
||||
## Configuration Types
|
||||
|
||||
### TelemetryConfig
|
||||
|
||||
User-facing configuration passed to the `TelemetryClient` constructor.
|
||||
|
||||
```typescript
|
||||
import type { TelemetryConfig } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Required | Default | Description |
|
||||
|-------|------|----------|---------|-------------|
|
||||
| `serverUrl` | `string` | Yes | — | Telemetry API base URL (e.g., `"https://tel-api.mosaicstack.dev"`) |
|
||||
| `apiKey` | `string` | Yes | — | Bearer token for `POST /v1/events/batch` authentication |
|
||||
| `instanceId` | `string` | Yes | — | UUID identifying this Mosaic Stack instance |
|
||||
| `enabled` | `boolean` | No | `true` | When `false`, `track()` is a no-op |
|
||||
| `submitIntervalMs` | `number` | No | `300_000` | Background flush interval in ms (5 min) |
|
||||
| `maxQueueSize` | `number` | No | `1000` | Maximum events held in queue before FIFO eviction |
|
||||
| `batchSize` | `number` | No | `100` | Events per batch (server max: 100) |
|
||||
| `requestTimeoutMs` | `number` | No | `10_000` | HTTP request timeout in ms |
|
||||
| `predictionCacheTtlMs` | `number` | No | `21_600_000` | Prediction cache TTL in ms (6 hours) |
|
||||
| `dryRun` | `boolean` | No | `false` | Simulate submissions without HTTP calls |
|
||||
| `maxRetries` | `number` | No | `3` | Retry attempts on transient failure |
|
||||
| `onError` | `(error: Error) => void` | No | silent | Callback invoked on errors |
|
||||
|
||||
### ResolvedConfig
|
||||
|
||||
Internal configuration with all defaults applied. All fields are required (non-optional).
|
||||
|
||||
```typescript
|
||||
import type { ResolvedConfig } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
### resolveConfig
|
||||
|
||||
```typescript
|
||||
import { resolveConfig } from '@mosaicstack/telemetry-client';
|
||||
|
||||
function resolveConfig(config: TelemetryConfig): ResolvedConfig
|
||||
```
|
||||
|
||||
Apply defaults to a `TelemetryConfig`, producing a `ResolvedConfig`. Strips trailing slashes from `serverUrl`.
|
||||
|
||||
---
|
||||
|
||||
## Event Types
|
||||
|
||||
### EventBuilderParams
|
||||
|
||||
Parameters accepted by `EventBuilder.build()`. Excludes auto-generated fields (`event_id`, `timestamp`, `instance_id`, `schema_version`).
|
||||
|
||||
```typescript
|
||||
import type { EventBuilderParams } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
|-------|------|----------|-------------|
|
||||
| `task_duration_ms` | `number` | Yes | Wall-clock time in ms (0–86,400,000) |
|
||||
| `task_type` | `TaskType` | Yes | Category of work performed |
|
||||
| `complexity` | `Complexity` | Yes | Task complexity level |
|
||||
| `harness` | `Harness` | Yes | Coding tool / execution environment |
|
||||
| `model` | `string` | Yes | Model identifier (1–100 chars) |
|
||||
| `provider` | `Provider` | Yes | LLM provider |
|
||||
| `estimated_input_tokens` | `number` | Yes | Pre-task input token estimate (0–10,000,000) |
|
||||
| `estimated_output_tokens` | `number` | Yes | Pre-task output token estimate (0–10,000,000) |
|
||||
| `actual_input_tokens` | `number` | Yes | Actual input tokens consumed (0–10,000,000) |
|
||||
| `actual_output_tokens` | `number` | Yes | Actual output tokens generated (0–10,000,000) |
|
||||
| `estimated_cost_usd_micros` | `number` | Yes | Estimated cost in microdollars (0–100,000,000) |
|
||||
| `actual_cost_usd_micros` | `number` | Yes | Actual cost in microdollars (0–100,000,000) |
|
||||
| `quality_gate_passed` | `boolean` | Yes | Whether all quality gates passed |
|
||||
| `quality_gates_run` | `QualityGate[]` | Yes | Gates that were executed |
|
||||
| `quality_gates_failed` | `QualityGate[]` | Yes | Gates that failed |
|
||||
| `context_compactions` | `number` | Yes | Context compaction count (0–100) |
|
||||
| `context_rotations` | `number` | Yes | Context rotation count (0–50) |
|
||||
| `context_utilization_final` | `number` | Yes | Final context utilization ratio (0.0–1.0) |
|
||||
| `outcome` | `Outcome` | Yes | Task result |
|
||||
| `retry_count` | `number` | Yes | Number of retries (0–20) |
|
||||
| `language` | `string \| null` | No | Primary programming language (max 30 chars) |
|
||||
| `repo_size_category` | `RepoSizeCategory \| null` | No | Repository size bucket |
|
||||
|
||||
### TaskCompletionEvent
|
||||
|
||||
Full event object submitted to the server. Extends `EventBuilderParams` with auto-generated identity fields.
|
||||
|
||||
```typescript
|
||||
import type { TaskCompletionEvent } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
Additional fields (auto-generated by `EventBuilder`):
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `instance_id` | `string` | UUID identifying the submitting instance |
|
||||
| `event_id` | `string` | Unique UUID for deduplication |
|
||||
| `schema_version` | `string` | Always `"1.0"` |
|
||||
| `timestamp` | `string` | ISO 8601 datetime |
|
||||
|
||||
---
|
||||
|
||||
## Prediction Types
|
||||
|
||||
### PredictionQuery
|
||||
|
||||
Query parameters for fetching a prediction.
|
||||
|
||||
```typescript
|
||||
import type { PredictionQuery } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `task_type` | `TaskType` | Task type to predict for |
|
||||
| `model` | `string` | Model identifier |
|
||||
| `provider` | `Provider` | LLM provider |
|
||||
| `complexity` | `Complexity` | Complexity level |
|
||||
|
||||
### PredictionResponse
|
||||
|
||||
Response from the predictions endpoint.
|
||||
|
||||
```typescript
|
||||
import type { PredictionResponse } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `prediction` | `PredictionData \| null` | Prediction data, or `null` if no data available |
|
||||
| `metadata` | `PredictionMetadata` | Sample size, confidence, fallback info |
|
||||
|
||||
### PredictionData
|
||||
|
||||
Statistical prediction for a dimension combination.
|
||||
|
||||
```typescript
|
||||
import type { PredictionData } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `input_tokens` | `TokenDistribution` | Input token distribution (p10/p25/median/p75/p90) |
|
||||
| `output_tokens` | `TokenDistribution` | Output token distribution |
|
||||
| `cost_usd_micros` | `Record<string, number>` | Cost stats — `{ median: number }` |
|
||||
| `duration_ms` | `Record<string, number>` | Duration stats — `{ median: number }` |
|
||||
| `correction_factors` | `CorrectionFactors` | Actual-to-estimated token ratios |
|
||||
| `quality` | `QualityPrediction` | Quality gate pass rate and success rate |
|
||||
|
||||
### TokenDistribution
|
||||
|
||||
Percentile distribution of token counts.
|
||||
|
||||
```typescript
|
||||
import type { TokenDistribution } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `p10` | `number` | 10th percentile |
|
||||
| `p25` | `number` | 25th percentile |
|
||||
| `median` | `number` | 50th percentile (median) |
|
||||
| `p75` | `number` | 75th percentile |
|
||||
| `p90` | `number` | 90th percentile |
|
||||
|
||||
### CorrectionFactors
|
||||
|
||||
Ratio of actual to estimated tokens. Values >1.0 mean estimates tend to be too low.
|
||||
|
||||
```typescript
|
||||
import type { CorrectionFactors } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `input` | `number` | Actual / estimated input tokens |
|
||||
| `output` | `number` | Actual / estimated output tokens |
|
||||
|
||||
### QualityPrediction
|
||||
|
||||
Predicted quality gate and success rates.
|
||||
|
||||
```typescript
|
||||
import type { QualityPrediction } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `gate_pass_rate` | `number` | Fraction of events where all quality gates pass (0.0–1.0) |
|
||||
| `success_rate` | `number` | Fraction of events with `outcome: "success"` (0.0–1.0) |
|
||||
|
||||
### PredictionMetadata
|
||||
|
||||
Metadata about a prediction response.
|
||||
|
||||
```typescript
|
||||
import type { PredictionMetadata } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `sample_size` | `number` | Number of events used to compute this prediction |
|
||||
| `fallback_level` | `number` | 0 = exact match, 1+ = dimensions dropped, -1 = no data |
|
||||
| `confidence` | `'none' \| 'low' \| 'medium' \| 'high'` | Confidence level |
|
||||
| `last_updated` | `string \| null` | ISO 8601 timestamp of last computation |
|
||||
| `dimensions_matched` | `Record<string, string \| null> \| null` | Matched dimensions (`null` values indicate fallback) |
|
||||
| `fallback_note` | `string \| null` | Human-readable fallback explanation |
|
||||
| `cache_hit` | `boolean` | Whether served from server-side cache |
|
||||
|
||||
**Confidence level criteria:**
|
||||
|
||||
| Level | Criteria |
|
||||
|-------|----------|
|
||||
| `none` | No data available. `prediction` is `null`. |
|
||||
| `low` | Sample size < 30 or fallback was applied |
|
||||
| `medium` | Sample size 30–99, exact match |
|
||||
| `high` | Sample size >= 100, exact match |
|
||||
|
||||
---
|
||||
|
||||
## Batch Types
|
||||
|
||||
### BatchEventRequest
|
||||
|
||||
Request body for `POST /v1/events/batch`.
|
||||
|
||||
```typescript
|
||||
import type { BatchEventRequest } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `events` | `TaskCompletionEvent[]` | 1–100 events to submit |
|
||||
|
||||
### BatchEventResponse
|
||||
|
||||
Response from `POST /v1/events/batch`.
|
||||
|
||||
```typescript
|
||||
import type { BatchEventResponse } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `accepted` | `number` | Count of accepted events |
|
||||
| `rejected` | `number` | Count of rejected events |
|
||||
| `results` | `BatchEventResult[]` | Per-event result details |
|
||||
|
||||
### BatchEventResult
|
||||
|
||||
Per-event result within a batch response.
|
||||
|
||||
```typescript
|
||||
import type { BatchEventResult } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `event_id` | `string` | The event's UUID |
|
||||
| `status` | `'accepted' \| 'rejected'` | Whether the event was accepted |
|
||||
| `error` | `string \| null` | Error message if rejected |
|
||||
|
||||
### SubmitResult
|
||||
|
||||
Internal result type from `BatchSubmitter.submit()`.
|
||||
|
||||
```typescript
|
||||
import type { SubmitResult } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `success` | `boolean` | Whether the submission succeeded |
|
||||
| `response` | `BatchEventResponse \| undefined` | Server response (on success) |
|
||||
| `retryAfterMs` | `number \| undefined` | Retry delay from 429 response |
|
||||
| `error` | `Error \| undefined` | Error details (on failure) |
|
||||
|
||||
### BatchPredictionRequest
|
||||
|
||||
Request body for `POST /v1/predictions/batch`.
|
||||
|
||||
```typescript
|
||||
import type { BatchPredictionRequest } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `queries` | `PredictionQuery[]` | 1–50 prediction queries |
|
||||
|
||||
### BatchPredictionResponse
|
||||
|
||||
Response from `POST /v1/predictions/batch`.
|
||||
|
||||
```typescript
|
||||
import type { BatchPredictionResponse } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `results` | `PredictionResponse[]` | One response per query, in request order |
|
||||
|
||||
---
|
||||
|
||||
## Enums
|
||||
|
||||
All enums use string values matching the server's API contract.
|
||||
|
||||
### TaskType
|
||||
|
||||
```typescript
|
||||
import { TaskType } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Description |
|
||||
|--------|-------|-------------|
|
||||
| `PLANNING` | `"planning"` | Architecture design, task breakdown |
|
||||
| `IMPLEMENTATION` | `"implementation"` | Writing new code |
|
||||
| `CODE_REVIEW` | `"code_review"` | Reviewing existing code |
|
||||
| `TESTING` | `"testing"` | Writing or running tests |
|
||||
| `DEBUGGING` | `"debugging"` | Investigating and fixing bugs |
|
||||
| `REFACTORING` | `"refactoring"` | Restructuring existing code |
|
||||
| `DOCUMENTATION` | `"documentation"` | Writing docs, comments, READMEs |
|
||||
| `CONFIGURATION` | `"configuration"` | Config files, CI/CD, infrastructure |
|
||||
| `SECURITY_AUDIT` | `"security_audit"` | Security review, vulnerability analysis |
|
||||
| `UNKNOWN` | `"unknown"` | Unclassified task type (fallback) |
|
||||
|
||||
### Complexity
|
||||
|
||||
```typescript
|
||||
import { Complexity } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Description | Typical Token Budget |
|
||||
|--------|-------|-------------|---------------------|
|
||||
| `LOW` | `"low"` | Simple fixes, typos, config changes | 50,000 |
|
||||
| `MEDIUM` | `"medium"` | Standard features, moderate logic | 150,000 |
|
||||
| `HIGH` | `"high"` | Complex features, multi-file changes | 350,000 |
|
||||
| `CRITICAL` | `"critical"` | Major refactoring, architectural changes | 750,000 |
|
||||
|
||||
### Harness
|
||||
|
||||
```typescript
|
||||
import { Harness } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Description |
|
||||
|--------|-------|-------------|
|
||||
| `CLAUDE_CODE` | `"claude_code"` | Anthropic Claude Code CLI |
|
||||
| `OPENCODE` | `"opencode"` | OpenCode CLI |
|
||||
| `KILO_CODE` | `"kilo_code"` | Kilo Code VS Code extension |
|
||||
| `AIDER` | `"aider"` | Aider AI pair programming |
|
||||
| `API_DIRECT` | `"api_direct"` | Direct API calls (no harness) |
|
||||
| `OLLAMA_LOCAL` | `"ollama_local"` | Ollama local inference |
|
||||
| `CUSTOM` | `"custom"` | Custom or unrecognized harness |
|
||||
| `UNKNOWN` | `"unknown"` | Harness not reported |
|
||||
|
||||
### Provider
|
||||
|
||||
```typescript
|
||||
import { Provider } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Description |
|
||||
|--------|-------|-------------|
|
||||
| `ANTHROPIC` | `"anthropic"` | Anthropic (Claude models) |
|
||||
| `OPENAI` | `"openai"` | OpenAI (GPT models) |
|
||||
| `OPENROUTER` | `"openrouter"` | OpenRouter (multi-provider routing) |
|
||||
| `OLLAMA` | `"ollama"` | Ollama (local/self-hosted) |
|
||||
| `GOOGLE` | `"google"` | Google (Gemini models) |
|
||||
| `MISTRAL` | `"mistral"` | Mistral AI |
|
||||
| `CUSTOM` | `"custom"` | Custom or unrecognized provider |
|
||||
| `UNKNOWN` | `"unknown"` | Provider not reported |
|
||||
|
||||
### QualityGate
|
||||
|
||||
```typescript
|
||||
import { QualityGate } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Description |
|
||||
|--------|-------|-------------|
|
||||
| `BUILD` | `"build"` | Code compiles/builds successfully |
|
||||
| `LINT` | `"lint"` | Linter passes with no errors |
|
||||
| `TEST` | `"test"` | Unit/integration tests pass |
|
||||
| `COVERAGE` | `"coverage"` | Code coverage meets threshold (85%) |
|
||||
| `TYPECHECK` | `"typecheck"` | Type checker passes |
|
||||
| `SECURITY` | `"security"` | Security scan passes |
|
||||
|
||||
### Outcome
|
||||
|
||||
```typescript
|
||||
import { Outcome } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Description |
|
||||
|--------|-------|-------------|
|
||||
| `SUCCESS` | `"success"` | Task completed, all quality gates passed |
|
||||
| `FAILURE` | `"failure"` | Task failed after all retries |
|
||||
| `PARTIAL` | `"partial"` | Task partially completed (some gates passed) |
|
||||
| `TIMEOUT` | `"timeout"` | Task exceeded time or token budget |
|
||||
|
||||
### RepoSizeCategory
|
||||
|
||||
```typescript
|
||||
import { RepoSizeCategory } from '@mosaicstack/telemetry-client';
|
||||
```
|
||||
|
||||
| Member | Value | Approximate LOC | Description |
|
||||
|--------|-------|-----------------|-------------|
|
||||
| `TINY` | `"tiny"` | < 1,000 | Scripts, single-file projects |
|
||||
| `SMALL` | `"small"` | 1,000–10,000 | Small libraries, tools |
|
||||
| `MEDIUM` | `"medium"` | 10,000–100,000 | Standard applications |
|
||||
| `LARGE` | `"large"` | 100,000–1,000,000 | Large applications, monorepos |
|
||||
| `HUGE` | `"huge"` | > 1,000,000 | Enterprise codebases |
|
||||
|
||||
---
|
||||
|
||||
## Server API Endpoints Used
|
||||
|
||||
The SDK communicates with these Mosaic Telemetry API v1 endpoints:
|
||||
|
||||
| SDK Method | HTTP Endpoint | Auth Required |
|
||||
|------------|---------------|---------------|
|
||||
| `flush()` (internal) | `POST /v1/events/batch` | Yes (Bearer token) |
|
||||
| `refreshPredictions()` | `POST /v1/predictions/batch` | No (public) |
|
||||
|
||||
For the full server API specification, see the [Mosaic Telemetry API Reference](https://tel-api.mosaicstack.dev/v1/docs).
|
||||
422
docs/integration-guide.md
Normal file
422
docs/integration-guide.md
Normal file
@@ -0,0 +1,422 @@
|
||||
# Integration Guide
|
||||
|
||||
This guide covers how to integrate `@mosaicstack/telemetry-client` into your applications. The SDK targets **Mosaic Telemetry API v1** (event schema version `1.0`).
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Node.js >= 18 (for native `fetch` and `crypto.randomUUID()`)
|
||||
- A Mosaic Telemetry API key and instance ID (issued by an administrator via the admin API)
|
||||
|
||||
## Installation
|
||||
|
||||
Configure the Gitea npm registry in your project's `.npmrc`:
|
||||
|
||||
```ini
|
||||
@mosaicstack:registry=https://git.mosaicstack.dev/api/packages/mosaic/npm/
|
||||
```
|
||||
|
||||
Then install:
|
||||
|
||||
```bash
|
||||
# Latest stable release (from main)
|
||||
npm install @mosaicstack/telemetry-client
|
||||
|
||||
# Latest dev build (from develop)
|
||||
npm install @mosaicstack/telemetry-client@dev
|
||||
```
|
||||
|
||||
| Branch | Dist-tag | Version format | Example |
|
||||
|--------|----------|----------------|---------|
|
||||
| `main` | `latest` | `{version}` | `0.1.0` |
|
||||
| `develop` | `dev` | `{version}-dev.{YYYYMMDDHHmmss}` | `0.1.0-dev.20260215050000` |
|
||||
|
||||
The package ships ESM-only with TypeScript declarations. Zero runtime dependencies.
|
||||
|
||||
## Environment Setup
|
||||
|
||||
Store your credentials in environment variables — never hardcode them.
|
||||
|
||||
```bash
|
||||
# .env (not committed — add to .gitignore)
|
||||
TELEMETRY_API_URL=https://tel-api.mosaicstack.dev
|
||||
TELEMETRY_API_KEY=msk_your_api_key_here
|
||||
TELEMETRY_INSTANCE_ID=a1b2c3d4-e5f6-4a7b-8c9d-0e1f2a3b4c5d
|
||||
```
|
||||
|
||||
```bash
|
||||
# .env.example (committed — documents required variables)
|
||||
TELEMETRY_API_URL=https://tel-api.mosaicstack.dev
|
||||
TELEMETRY_API_KEY=your-api-key
|
||||
TELEMETRY_INSTANCE_ID=your-instance-uuid
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Instrumenting a Next.js App
|
||||
|
||||
Next.js server actions and API routes run on Node.js, so the SDK works directly. Create a shared singleton and track events from your server-side code.
|
||||
|
||||
### 1. Create a telemetry singleton
|
||||
|
||||
```typescript
|
||||
// lib/telemetry.ts
|
||||
import {
|
||||
TelemetryClient,
|
||||
TaskType,
|
||||
Complexity,
|
||||
Harness,
|
||||
Provider,
|
||||
Outcome,
|
||||
QualityGate,
|
||||
} from '@mosaicstack/telemetry-client';
|
||||
|
||||
let client: TelemetryClient | null = null;
|
||||
|
||||
export function getTelemetryClient(): TelemetryClient {
|
||||
if (!client) {
|
||||
client = new TelemetryClient({
|
||||
serverUrl: process.env.TELEMETRY_API_URL!,
|
||||
apiKey: process.env.TELEMETRY_API_KEY!,
|
||||
instanceId: process.env.TELEMETRY_INSTANCE_ID!,
|
||||
enabled: process.env.NODE_ENV === 'production',
|
||||
onError: (err) => console.error('[telemetry]', err.message),
|
||||
});
|
||||
client.start();
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
||||
// Re-export enums for convenience
|
||||
export { TaskType, Complexity, Harness, Provider, Outcome, QualityGate };
|
||||
```
|
||||
|
||||
### 2. Track events from an API route
|
||||
|
||||
```typescript
|
||||
// app/api/task-complete/route.ts
|
||||
import { NextResponse } from 'next/server';
|
||||
import { getTelemetryClient, TaskType, Complexity, Harness, Provider, Outcome } from '@/lib/telemetry';
|
||||
|
||||
export async function POST(request: Request) {
|
||||
const body = await request.json();
|
||||
|
||||
const client = getTelemetryClient();
|
||||
const event = client.eventBuilder.build({
|
||||
task_duration_ms: body.durationMs,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
complexity: Complexity.MEDIUM,
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: body.model,
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: body.estimatedInputTokens,
|
||||
estimated_output_tokens: body.estimatedOutputTokens,
|
||||
actual_input_tokens: body.actualInputTokens,
|
||||
actual_output_tokens: body.actualOutputTokens,
|
||||
estimated_cost_usd_micros: body.estimatedCostMicros,
|
||||
actual_cost_usd_micros: body.actualCostMicros,
|
||||
quality_gate_passed: body.qualityGatePassed,
|
||||
quality_gates_run: body.qualityGatesRun,
|
||||
quality_gates_failed: body.qualityGatesFailed,
|
||||
context_compactions: body.contextCompactions,
|
||||
context_rotations: body.contextRotations,
|
||||
context_utilization_final: body.contextUtilization,
|
||||
outcome: Outcome.SUCCESS,
|
||||
retry_count: 0,
|
||||
language: 'typescript',
|
||||
});
|
||||
|
||||
client.track(event);
|
||||
|
||||
return NextResponse.json({ status: 'queued' });
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful shutdown
|
||||
|
||||
Next.js doesn't provide a built-in shutdown hook, but you can handle `SIGTERM`:
|
||||
|
||||
```typescript
|
||||
// instrumentation.ts (Next.js instrumentation file)
|
||||
export async function register() {
|
||||
if (process.env.NEXT_RUNTIME === 'nodejs') {
|
||||
const { getTelemetryClient } = await import('./lib/telemetry');
|
||||
|
||||
// Ensure the client starts on server boot
|
||||
getTelemetryClient();
|
||||
|
||||
// Flush remaining events on shutdown
|
||||
const shutdown = async () => {
|
||||
const { getTelemetryClient } = await import('./lib/telemetry');
|
||||
const client = getTelemetryClient();
|
||||
await client.stop();
|
||||
process.exit(0);
|
||||
};
|
||||
|
||||
process.on('SIGTERM', shutdown);
|
||||
process.on('SIGINT', shutdown);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Instrumenting a Node.js Service
|
||||
|
||||
For a standalone Node.js service (Express, Fastify, plain script, etc.).
|
||||
|
||||
### 1. Initialize and start
|
||||
|
||||
```typescript
|
||||
// src/telemetry.ts
|
||||
import { TelemetryClient } from '@mosaicstack/telemetry-client';
|
||||
|
||||
export const telemetry = new TelemetryClient({
|
||||
serverUrl: process.env.TELEMETRY_API_URL ?? 'https://tel-api.mosaicstack.dev',
|
||||
apiKey: process.env.TELEMETRY_API_KEY!,
|
||||
instanceId: process.env.TELEMETRY_INSTANCE_ID!,
|
||||
onError: (err) => console.error('[telemetry]', err.message),
|
||||
});
|
||||
|
||||
telemetry.start();
|
||||
```
|
||||
|
||||
### 2. Track events after task completion
|
||||
|
||||
```typescript
|
||||
// src/task-runner.ts
|
||||
import {
|
||||
TaskType,
|
||||
Complexity,
|
||||
Harness,
|
||||
Provider,
|
||||
Outcome,
|
||||
QualityGate,
|
||||
} from '@mosaicstack/telemetry-client';
|
||||
import { telemetry } from './telemetry.js';
|
||||
|
||||
async function runTask() {
|
||||
const startTime = Date.now();
|
||||
|
||||
// ... run your AI coding task ...
|
||||
|
||||
const durationMs = Date.now() - startTime;
|
||||
|
||||
const event = telemetry.eventBuilder.build({
|
||||
task_duration_ms: durationMs,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
complexity: Complexity.HIGH,
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 200000,
|
||||
estimated_output_tokens: 80000,
|
||||
actual_input_tokens: 215000,
|
||||
actual_output_tokens: 72000,
|
||||
estimated_cost_usd_micros: 1200000,
|
||||
actual_cost_usd_micros: 1150000,
|
||||
quality_gate_passed: true,
|
||||
quality_gates_run: [
|
||||
QualityGate.BUILD,
|
||||
QualityGate.LINT,
|
||||
QualityGate.TEST,
|
||||
QualityGate.TYPECHECK,
|
||||
],
|
||||
quality_gates_failed: [],
|
||||
context_compactions: 3,
|
||||
context_rotations: 1,
|
||||
context_utilization_final: 0.85,
|
||||
outcome: Outcome.SUCCESS,
|
||||
retry_count: 0,
|
||||
language: 'typescript',
|
||||
repo_size_category: 'medium',
|
||||
});
|
||||
|
||||
telemetry.track(event);
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful shutdown
|
||||
|
||||
```typescript
|
||||
// src/main.ts
|
||||
import { telemetry } from './telemetry.js';
|
||||
|
||||
async function main() {
|
||||
// ... your application logic ...
|
||||
|
||||
// On shutdown, flush remaining events
|
||||
process.on('SIGTERM', async () => {
|
||||
await telemetry.stop();
|
||||
process.exit(0);
|
||||
});
|
||||
}
|
||||
|
||||
main();
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using Predictions
|
||||
|
||||
The telemetry API provides crowd-sourced predictions for token usage, cost, and duration based on historical data. The SDK caches these predictions locally.
|
||||
|
||||
### Pre-populate the cache
|
||||
|
||||
Call `refreshPredictions()` at startup with the dimension combinations your application uses:
|
||||
|
||||
```typescript
|
||||
import { TaskType, Provider, Complexity } from '@mosaicstack/telemetry-client';
|
||||
import { telemetry } from './telemetry.js';
|
||||
|
||||
// Fetch predictions for all combinations you'll need
|
||||
await telemetry.refreshPredictions([
|
||||
{ task_type: TaskType.IMPLEMENTATION, model: 'claude-sonnet-4-5-20250929', provider: Provider.ANTHROPIC, complexity: Complexity.LOW },
|
||||
{ task_type: TaskType.IMPLEMENTATION, model: 'claude-sonnet-4-5-20250929', provider: Provider.ANTHROPIC, complexity: Complexity.MEDIUM },
|
||||
{ task_type: TaskType.IMPLEMENTATION, model: 'claude-sonnet-4-5-20250929', provider: Provider.ANTHROPIC, complexity: Complexity.HIGH },
|
||||
{ task_type: TaskType.TESTING, model: 'claude-haiku-4-5-20251001', provider: Provider.ANTHROPIC, complexity: Complexity.LOW },
|
||||
]);
|
||||
```
|
||||
|
||||
### Read cached predictions
|
||||
|
||||
```typescript
|
||||
const prediction = telemetry.getPrediction({
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
model: 'claude-sonnet-4-5-20250929',
|
||||
provider: Provider.ANTHROPIC,
|
||||
complexity: Complexity.MEDIUM,
|
||||
});
|
||||
|
||||
if (prediction?.prediction) {
|
||||
const p = prediction.prediction;
|
||||
console.log('Token predictions (median):', {
|
||||
inputTokens: p.input_tokens.median,
|
||||
outputTokens: p.output_tokens.median,
|
||||
});
|
||||
console.log('Cost prediction:', `$${(p.cost_usd_micros.median / 1_000_000).toFixed(2)}`);
|
||||
console.log('Duration prediction:', `${(p.duration_ms.median / 1000).toFixed(0)}s`);
|
||||
console.log('Correction factors:', {
|
||||
input: p.correction_factors.input, // >1.0 means estimates tend to be too low
|
||||
output: p.correction_factors.output,
|
||||
});
|
||||
console.log('Quality:', {
|
||||
gatePassRate: `${(p.quality.gate_pass_rate * 100).toFixed(0)}%`,
|
||||
successRate: `${(p.quality.success_rate * 100).toFixed(0)}%`,
|
||||
});
|
||||
|
||||
// Check confidence level
|
||||
if (prediction.metadata.confidence === 'low') {
|
||||
console.warn('Low confidence — small sample size or fallback was applied');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Understand fallback behavior
|
||||
|
||||
When the server doesn't have enough data for an exact match, it broadens the query by dropping dimensions (e.g., ignoring complexity). The `metadata` fields tell you what happened:
|
||||
|
||||
| `fallback_level` | Meaning |
|
||||
|-------------------|---------|
|
||||
| `0` | Exact match on all dimensions |
|
||||
| `1+` | Some dimensions were dropped to find data |
|
||||
| `-1` | No prediction data available at any level |
|
||||
|
||||
---
|
||||
|
||||
## Environment-Specific Configuration
|
||||
|
||||
### Development
|
||||
|
||||
```typescript
|
||||
const client = new TelemetryClient({
|
||||
serverUrl: 'http://localhost:8000', // Local dev server
|
||||
apiKey: process.env.TELEMETRY_API_KEY!,
|
||||
instanceId: process.env.TELEMETRY_INSTANCE_ID!,
|
||||
dryRun: true, // Don't send real data
|
||||
submitIntervalMs: 10_000, // Flush more frequently for debugging
|
||||
onError: (err) => console.error('[telemetry]', err),
|
||||
});
|
||||
```
|
||||
|
||||
### Production
|
||||
|
||||
```typescript
|
||||
const client = new TelemetryClient({
|
||||
serverUrl: 'https://tel-api.mosaicstack.dev',
|
||||
apiKey: process.env.TELEMETRY_API_KEY!,
|
||||
instanceId: process.env.TELEMETRY_INSTANCE_ID!,
|
||||
submitIntervalMs: 300_000, // 5 min (default)
|
||||
maxRetries: 3, // Retry on transient failures
|
||||
onError: (err) => {
|
||||
// Route to your observability stack
|
||||
logger.error('Telemetry submission failed', { error: err.message });
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
### Conditional enable/disable
|
||||
|
||||
```typescript
|
||||
const client = new TelemetryClient({
|
||||
serverUrl: process.env.TELEMETRY_API_URL!,
|
||||
apiKey: process.env.TELEMETRY_API_KEY!,
|
||||
instanceId: process.env.TELEMETRY_INSTANCE_ID!,
|
||||
enabled: process.env.TELEMETRY_ENABLED !== 'false', // Opt-out via env var
|
||||
});
|
||||
```
|
||||
|
||||
When `enabled` is `false`, `track()` returns immediately without queuing.
|
||||
|
||||
---
|
||||
|
||||
## Error Handling
|
||||
|
||||
The SDK is designed to never disrupt your application:
|
||||
|
||||
- **`track()` never throws.** All errors are caught and routed to the `onError` callback.
|
||||
- **Failed batches are re-queued.** If a submission fails, events are prepended back to the queue for the next flush cycle.
|
||||
- **Exponential backoff with jitter.** Retries use 1s base delay, doubling up to 60s, with random jitter to prevent thundering herd.
|
||||
- **`Retry-After` header support.** On HTTP 429 (rate limited), the SDK respects the server's `Retry-After` header.
|
||||
- **HTTP 403 is not retried.** An API key / instance ID mismatch is a permanent error.
|
||||
|
||||
### Custom error handling
|
||||
|
||||
```typescript
|
||||
const client = new TelemetryClient({
|
||||
// ...
|
||||
onError: (error) => {
|
||||
if (error.message.includes('HTTP 403')) {
|
||||
console.error('Telemetry auth failed — check API key and instance ID');
|
||||
} else if (error.message.includes('HTTP 429')) {
|
||||
console.warn('Telemetry rate limited — events will be retried');
|
||||
} else {
|
||||
console.error('Telemetry error:', error.message);
|
||||
}
|
||||
},
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Batch Submission Behavior
|
||||
|
||||
The SDK batches events for efficiency:
|
||||
|
||||
1. `track(event)` adds the event to an in-memory queue (bounded, FIFO eviction at capacity).
|
||||
2. Every `submitIntervalMs` (default: 5 minutes), the background timer drains the queue in batches of up to `batchSize` (default/max: 100).
|
||||
3. Each batch is POSTed to `POST /v1/events/batch` with exponential backoff on failure.
|
||||
4. Calling `stop()` flushes all remaining events before resolving.
|
||||
|
||||
The server accepts up to **100 events per batch** and supports **partial success** — some events may be accepted while others (e.g., duplicates) are rejected.
|
||||
|
||||
---
|
||||
|
||||
## API Version Compatibility
|
||||
|
||||
| SDK Version | API Version | Schema Version |
|
||||
|-------------|-------------|----------------|
|
||||
| 0.1.x | v1 (`/v1/` endpoints) | `1.0` |
|
||||
|
||||
The `EventBuilder` automatically sets `schema_version: "1.0"` on every event. The SDK submits to `/v1/events/batch` and queries `/v1/predictions/batch`.
|
||||
|
||||
When the telemetry API introduces a v2, this SDK will add support in a new major release. The server supports two API versions simultaneously during a 6-month deprecation window.
|
||||
@@ -1,4 +1,5 @@
|
||||
import eslint from '@eslint/js';
|
||||
import globals from 'globals';
|
||||
import tseslint from '@typescript-eslint/eslint-plugin';
|
||||
import tsparser from '@typescript-eslint/parser';
|
||||
|
||||
@@ -12,6 +13,9 @@ export default [
|
||||
ecmaVersion: 2022,
|
||||
sourceType: 'module',
|
||||
},
|
||||
globals: {
|
||||
...globals.nodeBuiltin,
|
||||
},
|
||||
},
|
||||
plugins: {
|
||||
'@typescript-eslint': tseslint,
|
||||
|
||||
15
package.json
15
package.json
@@ -1,14 +1,17 @@
|
||||
{
|
||||
"name": "@mosaicstack/telemetry-client",
|
||||
"version": "0.1.0",
|
||||
"version": "0.1.1",
|
||||
"description": "TypeScript client SDK for Mosaic Stack Telemetry",
|
||||
"type": "module",
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts",
|
||||
"main": "./dist/cjs/index.js",
|
||||
"module": "./dist/esm/index.js",
|
||||
"types": "./dist/esm/index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js"
|
||||
"types": "./dist/esm/index.d.ts",
|
||||
"import": "./dist/esm/index.js",
|
||||
"require": "./dist/cjs/index.js",
|
||||
"default": "./dist/esm/index.js"
|
||||
}
|
||||
},
|
||||
"engines": {
|
||||
@@ -19,7 +22,7 @@
|
||||
],
|
||||
"license": "MPL-2.0",
|
||||
"scripts": {
|
||||
"build": "tsc -p tsconfig.build.json",
|
||||
"build": "tsc -p tsconfig.build.json && tsc -p tsconfig.cjs.json && echo '{\"type\":\"commonjs\"}' > dist/cjs/package.json",
|
||||
"test": "vitest run",
|
||||
"test:watch": "vitest",
|
||||
"test:coverage": "vitest run --coverage",
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { TelemetryConfig, ResolvedConfig, resolveConfig } from './config.js';
|
||||
import { EventQueue } from './queue.js';
|
||||
import { BatchSubmitter } from './submitter.js';
|
||||
import { PredictionCache } from './prediction-cache.js';
|
||||
import { EventBuilder } from './event-builder.js';
|
||||
import { TaskCompletionEvent } from './types/events.js';
|
||||
import { PredictionQuery, PredictionResponse } from './types/predictions.js';
|
||||
import { BatchPredictionResponse } from './types/common.js';
|
||||
import { TelemetryConfig, ResolvedConfig, resolveConfig } from "./config.js";
|
||||
import { EventQueue } from "./queue.js";
|
||||
import { BatchSubmitter } from "./submitter.js";
|
||||
import { PredictionCache } from "./prediction-cache.js";
|
||||
import { EventBuilder } from "./event-builder.js";
|
||||
import { TaskCompletionEvent } from "./types/events.js";
|
||||
import { PredictionQuery, PredictionResponse } from "./types/predictions.js";
|
||||
import { BatchPredictionResponse } from "./types/common.js";
|
||||
|
||||
/**
|
||||
* Main telemetry client. Queues task-completion events for background
|
||||
@@ -24,7 +24,9 @@ export class TelemetryClient {
|
||||
this.config = resolveConfig(config);
|
||||
this.queue = new EventQueue(this.config.maxQueueSize);
|
||||
this.submitter = new BatchSubmitter(this.config);
|
||||
this.predictionCache = new PredictionCache(this.config.predictionCacheTtlMs);
|
||||
this.predictionCache = new PredictionCache(
|
||||
this.config.predictionCacheTtlMs,
|
||||
);
|
||||
this._eventBuilder = new EventBuilder(this.config);
|
||||
}
|
||||
|
||||
@@ -86,9 +88,9 @@ export class TelemetryClient {
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
method: "POST",
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ queries }),
|
||||
signal: controller.signal,
|
||||
|
||||
@@ -46,7 +46,7 @@ const DEFAULT_ON_ERROR = (_error: Error): void => {
|
||||
|
||||
export function resolveConfig(config: TelemetryConfig): ResolvedConfig {
|
||||
return {
|
||||
serverUrl: config.serverUrl.replace(/\/+$/, ''),
|
||||
serverUrl: config.serverUrl.replace(/\/+$/, ""),
|
||||
apiKey: config.apiKey,
|
||||
instanceId: config.instanceId,
|
||||
enabled: config.enabled ?? true,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ResolvedConfig } from './config.js';
|
||||
import { ResolvedConfig } from "./config.js";
|
||||
import {
|
||||
Complexity,
|
||||
Harness,
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
RepoSizeCategory,
|
||||
TaskCompletionEvent,
|
||||
TaskType,
|
||||
} from './types/events.js';
|
||||
} from "./types/events.js";
|
||||
|
||||
export interface EventBuilderParams {
|
||||
task_duration_ms: number;
|
||||
@@ -54,7 +54,7 @@ export class EventBuilder {
|
||||
return {
|
||||
instance_id: this.config.instanceId,
|
||||
event_id: crypto.randomUUID(),
|
||||
schema_version: '1.0',
|
||||
schema_version: "1.0",
|
||||
timestamp: new Date().toISOString(),
|
||||
...params,
|
||||
};
|
||||
|
||||
22
src/index.ts
22
src/index.ts
@@ -1,12 +1,12 @@
|
||||
export { TelemetryClient } from './client.js';
|
||||
export { EventBuilder } from './event-builder.js';
|
||||
export { EventQueue } from './queue.js';
|
||||
export { BatchSubmitter } from './submitter.js';
|
||||
export { PredictionCache } from './prediction-cache.js';
|
||||
export { resolveConfig } from './config.js';
|
||||
export type { TelemetryConfig, ResolvedConfig } from './config.js';
|
||||
export type { EventBuilderParams } from './event-builder.js';
|
||||
export type { SubmitResult } from './submitter.js';
|
||||
export { TelemetryClient } from "./client.js";
|
||||
export { EventBuilder } from "./event-builder.js";
|
||||
export { EventQueue } from "./queue.js";
|
||||
export { BatchSubmitter } from "./submitter.js";
|
||||
export { PredictionCache } from "./prediction-cache.js";
|
||||
export { resolveConfig } from "./config.js";
|
||||
export type { TelemetryConfig, ResolvedConfig } from "./config.js";
|
||||
export type { EventBuilderParams } from "./event-builder.js";
|
||||
export type { SubmitResult } from "./submitter.js";
|
||||
|
||||
// Re-export all types
|
||||
export {
|
||||
@@ -17,7 +17,7 @@ export {
|
||||
QualityGate,
|
||||
Outcome,
|
||||
RepoSizeCategory,
|
||||
} from './types/index.js';
|
||||
} from "./types/index.js";
|
||||
|
||||
export type {
|
||||
TaskCompletionEvent,
|
||||
@@ -33,4 +33,4 @@ export type {
|
||||
BatchEventResponse,
|
||||
BatchPredictionRequest,
|
||||
BatchPredictionResponse,
|
||||
} from './types/index.js';
|
||||
} from "./types/index.js";
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { PredictionQuery, PredictionResponse } from './types/predictions.js';
|
||||
import { PredictionQuery, PredictionResponse } from "./types/predictions.js";
|
||||
|
||||
interface CacheEntry {
|
||||
response: PredictionResponse;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { TaskCompletionEvent } from './types/events.js';
|
||||
import { TaskCompletionEvent } from "./types/events.js";
|
||||
|
||||
/**
|
||||
* Bounded FIFO event queue. When the queue is full, the oldest events
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import { ResolvedConfig } from './config.js';
|
||||
import { TaskCompletionEvent } from './types/events.js';
|
||||
import { BatchEventResponse } from './types/common.js';
|
||||
import { ResolvedConfig } from "./config.js";
|
||||
import { TaskCompletionEvent } from "./types/events.js";
|
||||
import { BatchEventResponse } from "./types/common.js";
|
||||
|
||||
const SDK_VERSION = '0.1.0';
|
||||
const SDK_VERSION = "0.1.0";
|
||||
const USER_AGENT = `mosaic-telemetry-client-js/${SDK_VERSION}`;
|
||||
|
||||
export interface SubmitResult {
|
||||
@@ -36,7 +36,7 @@ export class BatchSubmitter {
|
||||
rejected: 0,
|
||||
results: events.map((e) => ({
|
||||
event_id: e.event_id,
|
||||
status: 'accepted' as const,
|
||||
status: "accepted" as const,
|
||||
})),
|
||||
},
|
||||
};
|
||||
@@ -68,7 +68,7 @@ export class BatchSubmitter {
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: lastError ?? new Error('Max retries exceeded'),
|
||||
error: lastError ?? new Error("Max retries exceeded"),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -84,19 +84,21 @@ export class BatchSubmitter {
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: 'POST',
|
||||
method: "POST",
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${this.config.apiKey}`,
|
||||
'User-Agent': USER_AGENT,
|
||||
"User-Agent": USER_AGENT,
|
||||
},
|
||||
body: JSON.stringify({ events }),
|
||||
signal: controller.signal,
|
||||
});
|
||||
|
||||
if (response.status === 429) {
|
||||
const retryAfter = response.headers.get('Retry-After');
|
||||
const retryAfterMs = retryAfter ? parseInt(retryAfter, 10) * 1000 : 5000;
|
||||
const retryAfter = response.headers.get("Retry-After");
|
||||
const retryAfterMs = retryAfter
|
||||
? parseInt(retryAfter, 10) * 1000
|
||||
: 5000;
|
||||
return { success: false, retryAfterMs };
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { TaskCompletionEvent } from './events.js';
|
||||
import { PredictionQuery, PredictionResponse } from './predictions.js';
|
||||
import { TaskCompletionEvent } from "./events.js";
|
||||
import { PredictionQuery, PredictionResponse } from "./predictions.js";
|
||||
|
||||
export interface BatchEventRequest {
|
||||
events: TaskCompletionEvent[];
|
||||
@@ -7,7 +7,7 @@ export interface BatchEventRequest {
|
||||
|
||||
export interface BatchEventResult {
|
||||
event_id: string;
|
||||
status: 'accepted' | 'rejected';
|
||||
status: "accepted" | "rejected";
|
||||
error?: string | null;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,67 +1,67 @@
|
||||
export enum TaskType {
|
||||
PLANNING = 'planning',
|
||||
IMPLEMENTATION = 'implementation',
|
||||
CODE_REVIEW = 'code_review',
|
||||
TESTING = 'testing',
|
||||
DEBUGGING = 'debugging',
|
||||
REFACTORING = 'refactoring',
|
||||
DOCUMENTATION = 'documentation',
|
||||
CONFIGURATION = 'configuration',
|
||||
SECURITY_AUDIT = 'security_audit',
|
||||
UNKNOWN = 'unknown',
|
||||
PLANNING = "planning",
|
||||
IMPLEMENTATION = "implementation",
|
||||
CODE_REVIEW = "code_review",
|
||||
TESTING = "testing",
|
||||
DEBUGGING = "debugging",
|
||||
REFACTORING = "refactoring",
|
||||
DOCUMENTATION = "documentation",
|
||||
CONFIGURATION = "configuration",
|
||||
SECURITY_AUDIT = "security_audit",
|
||||
UNKNOWN = "unknown",
|
||||
}
|
||||
|
||||
export enum Complexity {
|
||||
LOW = 'low',
|
||||
MEDIUM = 'medium',
|
||||
HIGH = 'high',
|
||||
CRITICAL = 'critical',
|
||||
LOW = "low",
|
||||
MEDIUM = "medium",
|
||||
HIGH = "high",
|
||||
CRITICAL = "critical",
|
||||
}
|
||||
|
||||
export enum Harness {
|
||||
CLAUDE_CODE = 'claude_code',
|
||||
OPENCODE = 'opencode',
|
||||
KILO_CODE = 'kilo_code',
|
||||
AIDER = 'aider',
|
||||
API_DIRECT = 'api_direct',
|
||||
OLLAMA_LOCAL = 'ollama_local',
|
||||
CUSTOM = 'custom',
|
||||
UNKNOWN = 'unknown',
|
||||
CLAUDE_CODE = "claude_code",
|
||||
OPENCODE = "opencode",
|
||||
KILO_CODE = "kilo_code",
|
||||
AIDER = "aider",
|
||||
API_DIRECT = "api_direct",
|
||||
OLLAMA_LOCAL = "ollama_local",
|
||||
CUSTOM = "custom",
|
||||
UNKNOWN = "unknown",
|
||||
}
|
||||
|
||||
export enum Provider {
|
||||
ANTHROPIC = 'anthropic',
|
||||
OPENAI = 'openai',
|
||||
OPENROUTER = 'openrouter',
|
||||
OLLAMA = 'ollama',
|
||||
GOOGLE = 'google',
|
||||
MISTRAL = 'mistral',
|
||||
CUSTOM = 'custom',
|
||||
UNKNOWN = 'unknown',
|
||||
ANTHROPIC = "anthropic",
|
||||
OPENAI = "openai",
|
||||
OPENROUTER = "openrouter",
|
||||
OLLAMA = "ollama",
|
||||
GOOGLE = "google",
|
||||
MISTRAL = "mistral",
|
||||
CUSTOM = "custom",
|
||||
UNKNOWN = "unknown",
|
||||
}
|
||||
|
||||
export enum QualityGate {
|
||||
BUILD = 'build',
|
||||
LINT = 'lint',
|
||||
TEST = 'test',
|
||||
COVERAGE = 'coverage',
|
||||
TYPECHECK = 'typecheck',
|
||||
SECURITY = 'security',
|
||||
BUILD = "build",
|
||||
LINT = "lint",
|
||||
TEST = "test",
|
||||
COVERAGE = "coverage",
|
||||
TYPECHECK = "typecheck",
|
||||
SECURITY = "security",
|
||||
}
|
||||
|
||||
export enum Outcome {
|
||||
SUCCESS = 'success',
|
||||
FAILURE = 'failure',
|
||||
PARTIAL = 'partial',
|
||||
TIMEOUT = 'timeout',
|
||||
SUCCESS = "success",
|
||||
FAILURE = "failure",
|
||||
PARTIAL = "partial",
|
||||
TIMEOUT = "timeout",
|
||||
}
|
||||
|
||||
export enum RepoSizeCategory {
|
||||
TINY = 'tiny',
|
||||
SMALL = 'small',
|
||||
MEDIUM = 'medium',
|
||||
LARGE = 'large',
|
||||
HUGE = 'huge',
|
||||
TINY = "tiny",
|
||||
SMALL = "small",
|
||||
MEDIUM = "medium",
|
||||
LARGE = "large",
|
||||
HUGE = "huge",
|
||||
}
|
||||
|
||||
export interface TaskCompletionEvent {
|
||||
|
||||
@@ -7,7 +7,7 @@ export {
|
||||
Outcome,
|
||||
RepoSizeCategory,
|
||||
type TaskCompletionEvent,
|
||||
} from './events.js';
|
||||
} from "./events.js";
|
||||
|
||||
export {
|
||||
type TokenDistribution,
|
||||
@@ -17,7 +17,7 @@ export {
|
||||
type PredictionMetadata,
|
||||
type PredictionResponse,
|
||||
type PredictionQuery,
|
||||
} from './predictions.js';
|
||||
} from "./predictions.js";
|
||||
|
||||
export {
|
||||
type BatchEventRequest,
|
||||
@@ -25,4 +25,4 @@ export {
|
||||
type BatchEventResponse,
|
||||
type BatchPredictionRequest,
|
||||
type BatchPredictionResponse,
|
||||
} from './common.js';
|
||||
} from "./common.js";
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { Complexity, Provider, TaskType } from './events.js';
|
||||
import { Complexity, Provider, TaskType } from "./events.js";
|
||||
|
||||
export interface TokenDistribution {
|
||||
p10: number;
|
||||
@@ -30,7 +30,7 @@ export interface PredictionData {
|
||||
export interface PredictionMetadata {
|
||||
sample_size: number;
|
||||
fallback_level: number;
|
||||
confidence: 'none' | 'low' | 'medium' | 'high';
|
||||
confidence: "none" | "low" | "medium" | "high";
|
||||
last_updated: string | null;
|
||||
dimensions_matched?: Record<string, string | null> | null;
|
||||
fallback_note?: string | null;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { TelemetryClient } from '../src/client.js';
|
||||
import { TelemetryConfig } from '../src/config.js';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { TelemetryClient } from "../src/client.js";
|
||||
import { TelemetryConfig } from "../src/config.js";
|
||||
import {
|
||||
TaskCompletionEvent,
|
||||
TaskType,
|
||||
@@ -8,14 +8,17 @@ import {
|
||||
Harness,
|
||||
Provider,
|
||||
Outcome,
|
||||
} from '../src/types/events.js';
|
||||
import { PredictionQuery, PredictionResponse } from '../src/types/predictions.js';
|
||||
} from "../src/types/events.js";
|
||||
import {
|
||||
PredictionQuery,
|
||||
PredictionResponse,
|
||||
} from "../src/types/predictions.js";
|
||||
|
||||
function makeConfig(overrides: Partial<TelemetryConfig> = {}): TelemetryConfig {
|
||||
return {
|
||||
serverUrl: 'https://tel.example.com',
|
||||
apiKey: 'a'.repeat(64),
|
||||
instanceId: 'test-instance',
|
||||
serverUrl: "https://tel.example.com",
|
||||
apiKey: "a".repeat(64),
|
||||
instanceId: "test-instance",
|
||||
submitIntervalMs: 60_000,
|
||||
maxQueueSize: 100,
|
||||
batchSize: 10,
|
||||
@@ -25,17 +28,17 @@ function makeConfig(overrides: Partial<TelemetryConfig> = {}): TelemetryConfig {
|
||||
};
|
||||
}
|
||||
|
||||
function makeEvent(id = 'evt-1'): TaskCompletionEvent {
|
||||
function makeEvent(id = "evt-1"): TaskCompletionEvent {
|
||||
return {
|
||||
instance_id: 'test-instance',
|
||||
instance_id: "test-instance",
|
||||
event_id: id,
|
||||
schema_version: '1.0',
|
||||
schema_version: "1.0",
|
||||
timestamp: new Date().toISOString(),
|
||||
task_duration_ms: 5000,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
complexity: Complexity.MEDIUM,
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: 'claude-3-opus',
|
||||
model: "claude-3-opus",
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 1000,
|
||||
estimated_output_tokens: 500,
|
||||
@@ -57,7 +60,7 @@ function makeEvent(id = 'evt-1'): TaskCompletionEvent {
|
||||
function makeQuery(): PredictionQuery {
|
||||
return {
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
model: 'claude-3-opus',
|
||||
model: "claude-3-opus",
|
||||
provider: Provider.ANTHROPIC,
|
||||
complexity: Complexity.MEDIUM,
|
||||
};
|
||||
@@ -76,20 +79,20 @@ function makePredictionResponse(): PredictionResponse {
|
||||
metadata: {
|
||||
sample_size: 100,
|
||||
fallback_level: 0,
|
||||
confidence: 'high',
|
||||
confidence: "high",
|
||||
last_updated: new Date().toISOString(),
|
||||
cache_hit: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe('TelemetryClient', () => {
|
||||
describe("TelemetryClient", () => {
|
||||
let fetchSpy: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
fetchSpy = vi.fn();
|
||||
vi.stubGlobal('fetch', fetchSpy);
|
||||
vi.stubGlobal("fetch", fetchSpy);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -97,8 +100,8 @@ describe('TelemetryClient', () => {
|
||||
vi.unstubAllGlobals();
|
||||
});
|
||||
|
||||
describe('start/stop lifecycle', () => {
|
||||
it('should start and stop cleanly', async () => {
|
||||
describe("start/stop lifecycle", () => {
|
||||
it("should start and stop cleanly", async () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
|
||||
expect(client.isRunning).toBe(false);
|
||||
@@ -109,26 +112,26 @@ describe('TelemetryClient', () => {
|
||||
expect(client.isRunning).toBe(false);
|
||||
});
|
||||
|
||||
it('should be idempotent on start', () => {
|
||||
it("should be idempotent on start", () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
client.start();
|
||||
client.start(); // Should not throw or create double intervals
|
||||
expect(client.isRunning).toBe(true);
|
||||
});
|
||||
|
||||
it('should be idempotent on stop', async () => {
|
||||
it("should be idempotent on stop", async () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
await client.stop();
|
||||
await client.stop(); // Should not throw
|
||||
expect(client.isRunning).toBe(false);
|
||||
});
|
||||
|
||||
it('should flush events on stop', async () => {
|
||||
it("should flush events on stop", async () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
client.start();
|
||||
|
||||
client.track(makeEvent('e1'));
|
||||
client.track(makeEvent('e2'));
|
||||
client.track(makeEvent("e1"));
|
||||
client.track(makeEvent("e2"));
|
||||
expect(client.queueSize).toBe(2);
|
||||
|
||||
await client.stop();
|
||||
@@ -137,21 +140,21 @@ describe('TelemetryClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('track()', () => {
|
||||
it('should queue events', () => {
|
||||
describe("track()", () => {
|
||||
it("should queue events", () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
client.track(makeEvent('e1'));
|
||||
client.track(makeEvent('e2'));
|
||||
client.track(makeEvent("e1"));
|
||||
client.track(makeEvent("e2"));
|
||||
expect(client.queueSize).toBe(2);
|
||||
});
|
||||
|
||||
it('should silently drop events when disabled', () => {
|
||||
it("should silently drop events when disabled", () => {
|
||||
const client = new TelemetryClient(makeConfig({ enabled: false }));
|
||||
client.track(makeEvent());
|
||||
expect(client.queueSize).toBe(0);
|
||||
});
|
||||
|
||||
it('should never throw even on internal error', () => {
|
||||
it("should never throw even on internal error", () => {
|
||||
const errorFn = vi.fn();
|
||||
const client = new TelemetryClient(
|
||||
makeConfig({ onError: errorFn, maxQueueSize: 0 }),
|
||||
@@ -163,14 +166,14 @@ describe('TelemetryClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('predictions', () => {
|
||||
it('should return null for uncached prediction', () => {
|
||||
describe("predictions", () => {
|
||||
it("should return null for uncached prediction", () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
const result = client.getPrediction(makeQuery());
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return cached prediction after refresh', async () => {
|
||||
it("should return cached prediction after refresh", async () => {
|
||||
const predictionResponse = makePredictionResponse();
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
@@ -190,8 +193,8 @@ describe('TelemetryClient', () => {
|
||||
expect(result).toEqual(predictionResponse);
|
||||
});
|
||||
|
||||
it('should handle refresh error gracefully', async () => {
|
||||
fetchSpy.mockRejectedValueOnce(new Error('Network error'));
|
||||
it("should handle refresh error gracefully", async () => {
|
||||
fetchSpy.mockRejectedValueOnce(new Error("Network error"));
|
||||
|
||||
const errorFn = vi.fn();
|
||||
const client = new TelemetryClient(
|
||||
@@ -203,11 +206,11 @@ describe('TelemetryClient', () => {
|
||||
expect(errorFn).toHaveBeenCalledWith(expect.any(Error));
|
||||
});
|
||||
|
||||
it('should handle non-ok HTTP response on refresh', async () => {
|
||||
it("should handle non-ok HTTP response on refresh", async () => {
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 500,
|
||||
statusText: 'Internal Server Error',
|
||||
statusText: "Internal Server Error",
|
||||
});
|
||||
|
||||
const errorFn = vi.fn();
|
||||
@@ -220,14 +223,14 @@ describe('TelemetryClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('background flush', () => {
|
||||
it('should trigger flush on interval', async () => {
|
||||
describe("background flush", () => {
|
||||
it("should trigger flush on interval", async () => {
|
||||
const client = new TelemetryClient(
|
||||
makeConfig({ submitIntervalMs: 10_000 }),
|
||||
);
|
||||
client.start();
|
||||
|
||||
client.track(makeEvent('e1'));
|
||||
client.track(makeEvent("e1"));
|
||||
expect(client.queueSize).toBe(1);
|
||||
|
||||
// Advance past submit interval
|
||||
@@ -240,13 +243,13 @@ describe('TelemetryClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('flush error handling', () => {
|
||||
it('should re-enqueue events on submit failure', async () => {
|
||||
describe("flush error handling", () => {
|
||||
it("should re-enqueue events on submit failure", async () => {
|
||||
// Use non-dryRun mode to actually hit the submitter
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 500,
|
||||
statusText: 'Internal Server Error',
|
||||
statusText: "Internal Server Error",
|
||||
});
|
||||
|
||||
const errorFn = vi.fn();
|
||||
@@ -254,7 +257,7 @@ describe('TelemetryClient', () => {
|
||||
makeConfig({ dryRun: false, maxRetries: 0, onError: errorFn }),
|
||||
);
|
||||
|
||||
client.track(makeEvent('e1'));
|
||||
client.track(makeEvent("e1"));
|
||||
expect(client.queueSize).toBe(1);
|
||||
|
||||
// Start and trigger flush
|
||||
@@ -267,9 +270,9 @@ describe('TelemetryClient', () => {
|
||||
await client.stop();
|
||||
});
|
||||
|
||||
it('should handle onError callback that throws', async () => {
|
||||
it("should handle onError callback that throws", async () => {
|
||||
const throwingErrorFn = () => {
|
||||
throw new Error('Error handler broke');
|
||||
throw new Error("Error handler broke");
|
||||
};
|
||||
const client = new TelemetryClient(
|
||||
makeConfig({ onError: throwingErrorFn, enabled: false }),
|
||||
@@ -278,13 +281,15 @@ describe('TelemetryClient', () => {
|
||||
// This should not throw even though onError throws
|
||||
// Force an error path by calling track when disabled (no error),
|
||||
// but we can test via refreshPredictions
|
||||
fetchSpy.mockRejectedValueOnce(new Error('fail'));
|
||||
await expect(client.refreshPredictions([makeQuery()])).resolves.not.toThrow();
|
||||
fetchSpy.mockRejectedValueOnce(new Error("fail"));
|
||||
await expect(
|
||||
client.refreshPredictions([makeQuery()]),
|
||||
).resolves.not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('event builder', () => {
|
||||
it('should expose an event builder', () => {
|
||||
describe("event builder", () => {
|
||||
it("should expose an event builder", () => {
|
||||
const client = new TelemetryClient(makeConfig());
|
||||
expect(client.eventBuilder).toBeDefined();
|
||||
|
||||
@@ -293,7 +298,7 @@ describe('TelemetryClient', () => {
|
||||
task_type: TaskType.TESTING,
|
||||
complexity: Complexity.LOW,
|
||||
harness: Harness.AIDER,
|
||||
model: 'gpt-4',
|
||||
model: "gpt-4",
|
||||
provider: Provider.OPENAI,
|
||||
estimated_input_tokens: 100,
|
||||
estimated_output_tokens: 50,
|
||||
@@ -311,8 +316,8 @@ describe('TelemetryClient', () => {
|
||||
retry_count: 0,
|
||||
});
|
||||
|
||||
expect(event.instance_id).toBe('test-instance');
|
||||
expect(event.schema_version).toBe('1.0');
|
||||
expect(event.instance_id).toBe("test-instance");
|
||||
expect(event.schema_version).toBe("1.0");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { describe, it, expect, vi, afterEach } from 'vitest';
|
||||
import { EventBuilder } from '../src/event-builder.js';
|
||||
import { ResolvedConfig } from '../src/config.js';
|
||||
import { describe, it, expect, vi, afterEach } from "vitest";
|
||||
import { EventBuilder } from "../src/event-builder.js";
|
||||
import { ResolvedConfig } from "../src/config.js";
|
||||
import {
|
||||
TaskType,
|
||||
Complexity,
|
||||
@@ -9,13 +9,13 @@ import {
|
||||
Outcome,
|
||||
QualityGate,
|
||||
RepoSizeCategory,
|
||||
} from '../src/types/events.js';
|
||||
} from "../src/types/events.js";
|
||||
|
||||
function makeConfig(): ResolvedConfig {
|
||||
return {
|
||||
serverUrl: 'https://tel.example.com',
|
||||
apiKey: 'a'.repeat(64),
|
||||
instanceId: 'my-instance-uuid',
|
||||
serverUrl: "https://tel.example.com",
|
||||
apiKey: "a".repeat(64),
|
||||
instanceId: "my-instance-uuid",
|
||||
enabled: true,
|
||||
submitIntervalMs: 300_000,
|
||||
maxQueueSize: 1000,
|
||||
@@ -28,19 +28,19 @@ function makeConfig(): ResolvedConfig {
|
||||
};
|
||||
}
|
||||
|
||||
describe('EventBuilder', () => {
|
||||
describe("EventBuilder", () => {
|
||||
afterEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should build a complete TaskCompletionEvent', () => {
|
||||
it("should build a complete TaskCompletionEvent", () => {
|
||||
const builder = new EventBuilder(makeConfig());
|
||||
const event = builder.build({
|
||||
task_duration_ms: 15000,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
complexity: Complexity.HIGH,
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: 'claude-3-opus',
|
||||
model: "claude-3-opus",
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 2000,
|
||||
estimated_output_tokens: 1000,
|
||||
@@ -49,37 +49,41 @@ describe('EventBuilder', () => {
|
||||
estimated_cost_usd_micros: 100000,
|
||||
actual_cost_usd_micros: 110000,
|
||||
quality_gate_passed: true,
|
||||
quality_gates_run: [QualityGate.BUILD, QualityGate.TEST, QualityGate.LINT],
|
||||
quality_gates_run: [
|
||||
QualityGate.BUILD,
|
||||
QualityGate.TEST,
|
||||
QualityGate.LINT,
|
||||
],
|
||||
quality_gates_failed: [],
|
||||
context_compactions: 2,
|
||||
context_rotations: 1,
|
||||
context_utilization_final: 0.75,
|
||||
outcome: Outcome.SUCCESS,
|
||||
retry_count: 0,
|
||||
language: 'typescript',
|
||||
language: "typescript",
|
||||
repo_size_category: RepoSizeCategory.MEDIUM,
|
||||
});
|
||||
|
||||
expect(event.task_type).toBe(TaskType.IMPLEMENTATION);
|
||||
expect(event.complexity).toBe(Complexity.HIGH);
|
||||
expect(event.model).toBe('claude-3-opus');
|
||||
expect(event.model).toBe("claude-3-opus");
|
||||
expect(event.quality_gates_run).toEqual([
|
||||
QualityGate.BUILD,
|
||||
QualityGate.TEST,
|
||||
QualityGate.LINT,
|
||||
]);
|
||||
expect(event.language).toBe('typescript');
|
||||
expect(event.language).toBe("typescript");
|
||||
expect(event.repo_size_category).toBe(RepoSizeCategory.MEDIUM);
|
||||
});
|
||||
|
||||
it('should auto-generate event_id as UUID', () => {
|
||||
it("should auto-generate event_id as UUID", () => {
|
||||
const builder = new EventBuilder(makeConfig());
|
||||
const event = builder.build({
|
||||
task_duration_ms: 1000,
|
||||
task_type: TaskType.TESTING,
|
||||
complexity: Complexity.LOW,
|
||||
harness: Harness.AIDER,
|
||||
model: 'gpt-4',
|
||||
model: "gpt-4",
|
||||
provider: Provider.OPENAI,
|
||||
estimated_input_tokens: 100,
|
||||
estimated_output_tokens: 50,
|
||||
@@ -108,7 +112,7 @@ describe('EventBuilder', () => {
|
||||
task_type: TaskType.TESTING,
|
||||
complexity: Complexity.LOW,
|
||||
harness: Harness.AIDER,
|
||||
model: 'gpt-4',
|
||||
model: "gpt-4",
|
||||
provider: Provider.OPENAI,
|
||||
estimated_input_tokens: 100,
|
||||
estimated_output_tokens: 50,
|
||||
@@ -129,8 +133,8 @@ describe('EventBuilder', () => {
|
||||
expect(event.event_id).not.toBe(event2.event_id);
|
||||
});
|
||||
|
||||
it('should auto-set timestamp to ISO 8601', () => {
|
||||
const now = new Date('2026-02-07T10:00:00.000Z');
|
||||
it("should auto-set timestamp to ISO 8601", () => {
|
||||
const now = new Date("2026-02-07T10:00:00.000Z");
|
||||
vi.setSystemTime(now);
|
||||
|
||||
const builder = new EventBuilder(makeConfig());
|
||||
@@ -139,7 +143,7 @@ describe('EventBuilder', () => {
|
||||
task_type: TaskType.DEBUGGING,
|
||||
complexity: Complexity.MEDIUM,
|
||||
harness: Harness.OPENCODE,
|
||||
model: 'claude-3-sonnet',
|
||||
model: "claude-3-sonnet",
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 500,
|
||||
estimated_output_tokens: 200,
|
||||
@@ -157,10 +161,10 @@ describe('EventBuilder', () => {
|
||||
retry_count: 1,
|
||||
});
|
||||
|
||||
expect(event.timestamp).toBe('2026-02-07T10:00:00.000Z');
|
||||
expect(event.timestamp).toBe("2026-02-07T10:00:00.000Z");
|
||||
});
|
||||
|
||||
it('should set instance_id from config', () => {
|
||||
it("should set instance_id from config", () => {
|
||||
const config = makeConfig();
|
||||
const builder = new EventBuilder(config);
|
||||
const event = builder.build({
|
||||
@@ -168,7 +172,7 @@ describe('EventBuilder', () => {
|
||||
task_type: TaskType.PLANNING,
|
||||
complexity: Complexity.LOW,
|
||||
harness: Harness.UNKNOWN,
|
||||
model: 'test-model',
|
||||
model: "test-model",
|
||||
provider: Provider.UNKNOWN,
|
||||
estimated_input_tokens: 0,
|
||||
estimated_output_tokens: 0,
|
||||
@@ -186,17 +190,17 @@ describe('EventBuilder', () => {
|
||||
retry_count: 0,
|
||||
});
|
||||
|
||||
expect(event.instance_id).toBe('my-instance-uuid');
|
||||
expect(event.instance_id).toBe("my-instance-uuid");
|
||||
});
|
||||
|
||||
it('should set schema_version to 1.0', () => {
|
||||
it("should set schema_version to 1.0", () => {
|
||||
const builder = new EventBuilder(makeConfig());
|
||||
const event = builder.build({
|
||||
task_duration_ms: 1000,
|
||||
task_type: TaskType.REFACTORING,
|
||||
complexity: Complexity.CRITICAL,
|
||||
harness: Harness.KILO_CODE,
|
||||
model: 'gemini-pro',
|
||||
model: "gemini-pro",
|
||||
provider: Provider.GOOGLE,
|
||||
estimated_input_tokens: 3000,
|
||||
estimated_output_tokens: 2000,
|
||||
@@ -214,6 +218,6 @@ describe('EventBuilder', () => {
|
||||
retry_count: 0,
|
||||
});
|
||||
|
||||
expect(event.schema_version).toBe('1.0');
|
||||
expect(event.schema_version).toBe("1.0");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { PredictionCache } from '../src/prediction-cache.js';
|
||||
import { PredictionQuery, PredictionResponse } from '../src/types/predictions.js';
|
||||
import { TaskType, Complexity, Provider } from '../src/types/events.js';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { PredictionCache } from "../src/prediction-cache.js";
|
||||
import {
|
||||
PredictionQuery,
|
||||
PredictionResponse,
|
||||
} from "../src/types/predictions.js";
|
||||
import { TaskType, Complexity, Provider } from "../src/types/events.js";
|
||||
|
||||
function makeQuery(overrides: Partial<PredictionQuery> = {}): PredictionQuery {
|
||||
return {
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
model: 'claude-3-opus',
|
||||
model: "claude-3-opus",
|
||||
provider: Provider.ANTHROPIC,
|
||||
complexity: Complexity.MEDIUM,
|
||||
...overrides,
|
||||
@@ -26,14 +29,14 @@ function makeResponse(sampleSize = 100): PredictionResponse {
|
||||
metadata: {
|
||||
sample_size: sampleSize,
|
||||
fallback_level: 0,
|
||||
confidence: 'high',
|
||||
confidence: "high",
|
||||
last_updated: new Date().toISOString(),
|
||||
cache_hit: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe('PredictionCache', () => {
|
||||
describe("PredictionCache", () => {
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
@@ -42,13 +45,13 @@ describe('PredictionCache', () => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it('should return null for cache miss', () => {
|
||||
it("should return null for cache miss", () => {
|
||||
const cache = new PredictionCache(60_000);
|
||||
const result = cache.get(makeQuery());
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return cached prediction on hit', () => {
|
||||
it("should return cached prediction on hit", () => {
|
||||
const cache = new PredictionCache(60_000);
|
||||
const query = makeQuery();
|
||||
const response = makeResponse();
|
||||
@@ -59,7 +62,7 @@ describe('PredictionCache', () => {
|
||||
expect(result).toEqual(response);
|
||||
});
|
||||
|
||||
it('should return null when entry has expired', () => {
|
||||
it("should return null when entry has expired", () => {
|
||||
const cache = new PredictionCache(60_000); // 60s TTL
|
||||
const query = makeQuery();
|
||||
const response = makeResponse();
|
||||
@@ -73,7 +76,7 @@ describe('PredictionCache', () => {
|
||||
expect(cache.get(query)).toBeNull();
|
||||
});
|
||||
|
||||
it('should differentiate queries by all fields', () => {
|
||||
it("should differentiate queries by all fields", () => {
|
||||
const cache = new PredictionCache(60_000);
|
||||
|
||||
const query1 = makeQuery({ task_type: TaskType.IMPLEMENTATION });
|
||||
@@ -88,7 +91,7 @@ describe('PredictionCache', () => {
|
||||
expect(cache.get(query2)?.metadata.sample_size).toBe(200);
|
||||
});
|
||||
|
||||
it('should clear all entries', () => {
|
||||
it("should clear all entries", () => {
|
||||
const cache = new PredictionCache(60_000);
|
||||
cache.set(makeQuery(), makeResponse());
|
||||
cache.set(makeQuery({ task_type: TaskType.TESTING }), makeResponse());
|
||||
@@ -99,7 +102,7 @@ describe('PredictionCache', () => {
|
||||
expect(cache.get(makeQuery())).toBeNull();
|
||||
});
|
||||
|
||||
it('should overwrite existing entry with same query', () => {
|
||||
it("should overwrite existing entry with same query", () => {
|
||||
const cache = new PredictionCache(60_000);
|
||||
const query = makeQuery();
|
||||
|
||||
@@ -110,7 +113,7 @@ describe('PredictionCache', () => {
|
||||
expect(cache.get(query)?.metadata.sample_size).toBe(200);
|
||||
});
|
||||
|
||||
it('should clean expired entry on get', () => {
|
||||
it("should clean expired entry on get", () => {
|
||||
const cache = new PredictionCache(60_000);
|
||||
const query = makeQuery();
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { EventQueue } from '../src/queue.js';
|
||||
import { describe, it, expect } from "vitest";
|
||||
import { EventQueue } from "../src/queue.js";
|
||||
import {
|
||||
TaskType,
|
||||
Complexity,
|
||||
@@ -7,19 +7,19 @@ import {
|
||||
Provider,
|
||||
Outcome,
|
||||
TaskCompletionEvent,
|
||||
} from '../src/types/events.js';
|
||||
} from "../src/types/events.js";
|
||||
|
||||
function makeEvent(id: string): TaskCompletionEvent {
|
||||
return {
|
||||
instance_id: 'test-instance',
|
||||
instance_id: "test-instance",
|
||||
event_id: id,
|
||||
schema_version: '1.0',
|
||||
schema_version: "1.0",
|
||||
timestamp: new Date().toISOString(),
|
||||
task_duration_ms: 1000,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
complexity: Complexity.MEDIUM,
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: 'claude-3-opus',
|
||||
model: "claude-3-opus",
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 1000,
|
||||
estimated_output_tokens: 500,
|
||||
@@ -38,10 +38,10 @@ function makeEvent(id: string): TaskCompletionEvent {
|
||||
};
|
||||
}
|
||||
|
||||
describe('EventQueue', () => {
|
||||
it('should enqueue and drain events', () => {
|
||||
describe("EventQueue", () => {
|
||||
it("should enqueue and drain events", () => {
|
||||
const queue = new EventQueue(10);
|
||||
const event = makeEvent('e1');
|
||||
const event = makeEvent("e1");
|
||||
|
||||
queue.enqueue(event);
|
||||
expect(queue.size).toBe(1);
|
||||
@@ -49,102 +49,102 @@ describe('EventQueue', () => {
|
||||
|
||||
const drained = queue.drain(10);
|
||||
expect(drained).toHaveLength(1);
|
||||
expect(drained[0].event_id).toBe('e1');
|
||||
expect(drained[0].event_id).toBe("e1");
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
});
|
||||
|
||||
it('should respect maxSize with FIFO eviction', () => {
|
||||
it("should respect maxSize with FIFO eviction", () => {
|
||||
const queue = new EventQueue(3);
|
||||
|
||||
queue.enqueue(makeEvent('e1'));
|
||||
queue.enqueue(makeEvent('e2'));
|
||||
queue.enqueue(makeEvent('e3'));
|
||||
queue.enqueue(makeEvent("e1"));
|
||||
queue.enqueue(makeEvent("e2"));
|
||||
queue.enqueue(makeEvent("e3"));
|
||||
expect(queue.size).toBe(3);
|
||||
|
||||
// Adding a 4th should evict the oldest (e1)
|
||||
queue.enqueue(makeEvent('e4'));
|
||||
queue.enqueue(makeEvent("e4"));
|
||||
expect(queue.size).toBe(3);
|
||||
|
||||
const drained = queue.drain(10);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(['e2', 'e3', 'e4']);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(["e2", "e3", "e4"]);
|
||||
});
|
||||
|
||||
it('should drain up to maxItems', () => {
|
||||
it("should drain up to maxItems", () => {
|
||||
const queue = new EventQueue(10);
|
||||
queue.enqueue(makeEvent('e1'));
|
||||
queue.enqueue(makeEvent('e2'));
|
||||
queue.enqueue(makeEvent('e3'));
|
||||
queue.enqueue(makeEvent("e1"));
|
||||
queue.enqueue(makeEvent("e2"));
|
||||
queue.enqueue(makeEvent("e3"));
|
||||
|
||||
const drained = queue.drain(2);
|
||||
expect(drained).toHaveLength(2);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(['e1', 'e2']);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(["e1", "e2"]);
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should remove drained items from the queue', () => {
|
||||
it("should remove drained items from the queue", () => {
|
||||
const queue = new EventQueue(10);
|
||||
queue.enqueue(makeEvent('e1'));
|
||||
queue.enqueue(makeEvent('e2'));
|
||||
queue.enqueue(makeEvent("e1"));
|
||||
queue.enqueue(makeEvent("e2"));
|
||||
|
||||
queue.drain(1);
|
||||
expect(queue.size).toBe(1);
|
||||
|
||||
const remaining = queue.drain(10);
|
||||
expect(remaining[0].event_id).toBe('e2');
|
||||
expect(remaining[0].event_id).toBe("e2");
|
||||
});
|
||||
|
||||
it('should report isEmpty correctly', () => {
|
||||
it("should report isEmpty correctly", () => {
|
||||
const queue = new EventQueue(5);
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
|
||||
queue.enqueue(makeEvent('e1'));
|
||||
queue.enqueue(makeEvent("e1"));
|
||||
expect(queue.isEmpty).toBe(false);
|
||||
|
||||
queue.drain(1);
|
||||
expect(queue.isEmpty).toBe(true);
|
||||
});
|
||||
|
||||
it('should report size correctly', () => {
|
||||
it("should report size correctly", () => {
|
||||
const queue = new EventQueue(10);
|
||||
expect(queue.size).toBe(0);
|
||||
|
||||
queue.enqueue(makeEvent('e1'));
|
||||
queue.enqueue(makeEvent("e1"));
|
||||
expect(queue.size).toBe(1);
|
||||
|
||||
queue.enqueue(makeEvent('e2'));
|
||||
queue.enqueue(makeEvent("e2"));
|
||||
expect(queue.size).toBe(2);
|
||||
|
||||
queue.drain(1);
|
||||
expect(queue.size).toBe(1);
|
||||
});
|
||||
|
||||
it('should return empty array when draining empty queue', () => {
|
||||
it("should return empty array when draining empty queue", () => {
|
||||
const queue = new EventQueue(5);
|
||||
const drained = queue.drain(10);
|
||||
expect(drained).toEqual([]);
|
||||
});
|
||||
|
||||
it('should prepend events to the front of the queue', () => {
|
||||
it("should prepend events to the front of the queue", () => {
|
||||
const queue = new EventQueue(10);
|
||||
queue.enqueue(makeEvent('e3'));
|
||||
queue.enqueue(makeEvent("e3"));
|
||||
|
||||
queue.prepend([makeEvent('e1'), makeEvent('e2')]);
|
||||
queue.prepend([makeEvent("e1"), makeEvent("e2")]);
|
||||
expect(queue.size).toBe(3);
|
||||
|
||||
const drained = queue.drain(10);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(['e1', 'e2', 'e3']);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(["e1", "e2", "e3"]);
|
||||
});
|
||||
|
||||
it('should respect maxSize when prepending', () => {
|
||||
it("should respect maxSize when prepending", () => {
|
||||
const queue = new EventQueue(3);
|
||||
queue.enqueue(makeEvent('e3'));
|
||||
queue.enqueue(makeEvent('e4'));
|
||||
queue.enqueue(makeEvent("e3"));
|
||||
queue.enqueue(makeEvent("e4"));
|
||||
|
||||
// Only 1 slot available, so only first event should be prepended
|
||||
queue.prepend([makeEvent('e1'), makeEvent('e2')]);
|
||||
queue.prepend([makeEvent("e1"), makeEvent("e2")]);
|
||||
expect(queue.size).toBe(3);
|
||||
|
||||
const drained = queue.drain(10);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(['e1', 'e3', 'e4']);
|
||||
expect(drained.map((e) => e.event_id)).toEqual(["e1", "e3", "e4"]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { BatchSubmitter } from '../src/submitter.js';
|
||||
import { ResolvedConfig } from '../src/config.js';
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
|
||||
import { BatchSubmitter } from "../src/submitter.js";
|
||||
import { ResolvedConfig } from "../src/config.js";
|
||||
import {
|
||||
TaskCompletionEvent,
|
||||
TaskType,
|
||||
@@ -8,13 +8,13 @@ import {
|
||||
Harness,
|
||||
Provider,
|
||||
Outcome,
|
||||
} from '../src/types/events.js';
|
||||
} from "../src/types/events.js";
|
||||
|
||||
function makeConfig(overrides: Partial<ResolvedConfig> = {}): ResolvedConfig {
|
||||
return {
|
||||
serverUrl: 'https://tel.example.com',
|
||||
apiKey: 'a'.repeat(64),
|
||||
instanceId: 'test-instance-id',
|
||||
serverUrl: "https://tel.example.com",
|
||||
apiKey: "a".repeat(64),
|
||||
instanceId: "test-instance-id",
|
||||
enabled: true,
|
||||
submitIntervalMs: 300_000,
|
||||
maxQueueSize: 1000,
|
||||
@@ -28,17 +28,17 @@ function makeConfig(overrides: Partial<ResolvedConfig> = {}): ResolvedConfig {
|
||||
};
|
||||
}
|
||||
|
||||
function makeEvent(id = 'evt-1'): TaskCompletionEvent {
|
||||
function makeEvent(id = "evt-1"): TaskCompletionEvent {
|
||||
return {
|
||||
instance_id: 'test-instance-id',
|
||||
instance_id: "test-instance-id",
|
||||
event_id: id,
|
||||
schema_version: '1.0',
|
||||
schema_version: "1.0",
|
||||
timestamp: new Date().toISOString(),
|
||||
task_duration_ms: 5000,
|
||||
task_type: TaskType.IMPLEMENTATION,
|
||||
complexity: Complexity.MEDIUM,
|
||||
harness: Harness.CLAUDE_CODE,
|
||||
model: 'claude-3-opus',
|
||||
model: "claude-3-opus",
|
||||
provider: Provider.ANTHROPIC,
|
||||
estimated_input_tokens: 1000,
|
||||
estimated_output_tokens: 500,
|
||||
@@ -57,13 +57,13 @@ function makeEvent(id = 'evt-1'): TaskCompletionEvent {
|
||||
};
|
||||
}
|
||||
|
||||
describe('BatchSubmitter', () => {
|
||||
describe("BatchSubmitter", () => {
|
||||
let fetchSpy: ReturnType<typeof vi.fn>;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
fetchSpy = vi.fn();
|
||||
vi.stubGlobal('fetch', fetchSpy);
|
||||
vi.stubGlobal("fetch", fetchSpy);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
@@ -71,11 +71,11 @@ describe('BatchSubmitter', () => {
|
||||
vi.unstubAllGlobals();
|
||||
});
|
||||
|
||||
it('should submit a batch successfully', async () => {
|
||||
it("should submit a batch successfully", async () => {
|
||||
const responseBody = {
|
||||
accepted: 1,
|
||||
rejected: 0,
|
||||
results: [{ event_id: 'evt-1', status: 'accepted' }],
|
||||
results: [{ event_id: "evt-1", status: "accepted" }],
|
||||
};
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
@@ -91,13 +91,13 @@ describe('BatchSubmitter', () => {
|
||||
expect(fetchSpy).toHaveBeenCalledTimes(1);
|
||||
|
||||
const [url, options] = fetchSpy.mock.calls[0];
|
||||
expect(url).toBe('https://tel.example.com/v1/events/batch');
|
||||
expect(options.method).toBe('POST');
|
||||
expect(options.headers['Authorization']).toBe(`Bearer ${'a'.repeat(64)}`);
|
||||
expect(url).toBe("https://tel.example.com/v1/events/batch");
|
||||
expect(options.method).toBe("POST");
|
||||
expect(options.headers["Authorization"]).toBe(`Bearer ${"a".repeat(64)}`);
|
||||
});
|
||||
|
||||
it('should handle 429 with Retry-After header', async () => {
|
||||
const headers = new Map([['Retry-After', '1']]);
|
||||
it("should handle 429 with Retry-After header", async () => {
|
||||
const headers = new Map([["Retry-After", "1"]]);
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 429,
|
||||
@@ -108,7 +108,7 @@ describe('BatchSubmitter', () => {
|
||||
const responseBody = {
|
||||
accepted: 1,
|
||||
rejected: 0,
|
||||
results: [{ event_id: 'evt-1', status: 'accepted' }],
|
||||
results: [{ event_id: "evt-1", status: "accepted" }],
|
||||
};
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
@@ -129,23 +129,23 @@ describe('BatchSubmitter', () => {
|
||||
expect(fetchSpy).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should handle 403 error', async () => {
|
||||
it("should handle 403 error", async () => {
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
status: 403,
|
||||
statusText: 'Forbidden',
|
||||
statusText: "Forbidden",
|
||||
});
|
||||
|
||||
const submitter = new BatchSubmitter(makeConfig({ maxRetries: 0 }));
|
||||
const result = await submitter.submit([makeEvent()]);
|
||||
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.error?.message).toContain('Forbidden');
|
||||
expect(result.error?.message).toContain('403');
|
||||
expect(result.error?.message).toContain("Forbidden");
|
||||
expect(result.error?.message).toContain("403");
|
||||
});
|
||||
|
||||
it('should retry on network error with backoff', async () => {
|
||||
fetchSpy.mockRejectedValueOnce(new Error('Network error'));
|
||||
it("should retry on network error with backoff", async () => {
|
||||
fetchSpy.mockRejectedValueOnce(new Error("Network error"));
|
||||
fetchSpy.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
status: 202,
|
||||
@@ -153,7 +153,7 @@ describe('BatchSubmitter', () => {
|
||||
Promise.resolve({
|
||||
accepted: 1,
|
||||
rejected: 0,
|
||||
results: [{ event_id: 'evt-1', status: 'accepted' }],
|
||||
results: [{ event_id: "evt-1", status: "accepted" }],
|
||||
}),
|
||||
});
|
||||
|
||||
@@ -168,8 +168,8 @@ describe('BatchSubmitter', () => {
|
||||
expect(fetchSpy).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should fail after max retries exhausted', async () => {
|
||||
fetchSpy.mockRejectedValue(new Error('Network error'));
|
||||
it("should fail after max retries exhausted", async () => {
|
||||
fetchSpy.mockRejectedValue(new Error("Network error"));
|
||||
|
||||
const submitter = new BatchSubmitter(makeConfig({ maxRetries: 2 }));
|
||||
const submitPromise = submitter.submit([makeEvent()]);
|
||||
@@ -179,12 +179,15 @@ describe('BatchSubmitter', () => {
|
||||
|
||||
const result = await submitPromise;
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.error?.message).toBe('Network error');
|
||||
expect(result.error?.message).toBe("Network error");
|
||||
});
|
||||
|
||||
it('should not call fetch in dryRun mode', async () => {
|
||||
it("should not call fetch in dryRun mode", async () => {
|
||||
const submitter = new BatchSubmitter(makeConfig({ dryRun: true }));
|
||||
const result = await submitter.submit([makeEvent('evt-1'), makeEvent('evt-2')]);
|
||||
const result = await submitter.submit([
|
||||
makeEvent("evt-1"),
|
||||
makeEvent("evt-2"),
|
||||
]);
|
||||
|
||||
expect(result.success).toBe(true);
|
||||
expect(result.response?.accepted).toBe(2);
|
||||
@@ -192,12 +195,14 @@ describe('BatchSubmitter', () => {
|
||||
expect(fetchSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle request timeout via AbortController', async () => {
|
||||
it("should handle request timeout via AbortController", async () => {
|
||||
fetchSpy.mockImplementation(
|
||||
(_url: string, options: { signal: AbortSignal }) =>
|
||||
new Promise((_resolve, reject) => {
|
||||
options.signal.addEventListener('abort', () => {
|
||||
reject(new DOMException('The operation was aborted.', 'AbortError'));
|
||||
options.signal.addEventListener("abort", () => {
|
||||
reject(
|
||||
new DOMException("The operation was aborted.", "AbortError"),
|
||||
);
|
||||
});
|
||||
}),
|
||||
);
|
||||
@@ -211,6 +216,6 @@ describe('BatchSubmitter', () => {
|
||||
|
||||
const result = await submitPromise;
|
||||
expect(result.success).toBe(false);
|
||||
expect(result.error?.message).toContain('aborted');
|
||||
expect(result.error?.message).toContain("aborted");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "./dist/esm"
|
||||
},
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts"]
|
||||
}
|
||||
|
||||
9
tsconfig.cjs.json
Normal file
9
tsconfig.cjs.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"extends": "./tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"module": "CommonJS",
|
||||
"moduleResolution": "node",
|
||||
"outDir": "./dist/cjs"
|
||||
},
|
||||
"exclude": ["node_modules", "dist", "tests", "**/*.test.ts"]
|
||||
}
|
||||
Reference in New Issue
Block a user