Files
stack/apps/web/src/lib/api/telemetry.ts
Jason Woltje 5b77774d91
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
fix(web): remove mock data from dashboard telemetry/tasks/calendar (#656)
Co-authored-by: Jason Woltje <jason@diversecanvas.com>
Co-committed-by: Jason Woltje <jason@diversecanvas.com>
2026-03-02 14:19:27 +00:00

210 lines
5.5 KiB
TypeScript

/**
* Telemetry API Client
* Handles telemetry data fetching for the usage dashboard.
*/
import { apiGet, type ApiResponse } from "./client";
// ─── Types ───────────────────────────────────────────────────────────
export type TimeRange = "7d" | "30d" | "90d";
export interface UsageSummary {
totalTokens: number;
totalCost: number;
taskCount: number;
avgQualityGatePassRate: number;
}
export interface TokenUsagePoint {
date: string;
inputTokens: number;
outputTokens: number;
totalTokens: number;
}
export interface CostBreakdownItem {
model: string;
provider: string;
cost: number;
taskCount: number;
}
export interface TaskOutcomeItem {
outcome: string;
count: number;
color: string;
}
export interface EstimateParams {
taskType: string;
model: string;
provider: string;
complexity: string;
}
export interface EstimateResponse {
prediction: {
input_tokens: { median: number; p75: number; p90: number };
output_tokens: { median: number; p75: number; p90: number };
cost_usd_micros: Record<string, number>;
quality: { gate_pass_rate: number; success_rate: number };
} | null;
metadata: {
sample_size: number;
confidence: "none" | "low" | "medium" | "high";
};
}
interface ProviderUsageAnalyticsItem {
provider: string;
calls: number;
promptTokens: number;
completionTokens: number;
totalTokens: number;
costCents: number;
averageDurationMs: number;
}
interface ModelUsageAnalyticsItem {
model: string;
calls: number;
promptTokens: number;
completionTokens: number;
totalTokens: number;
costCents: number;
averageDurationMs: number;
}
interface TaskTypeUsageAnalyticsItem {
taskType: string;
calls: number;
promptTokens: number;
completionTokens: number;
totalTokens: number;
costCents: number;
averageDurationMs: number;
}
interface UsageAnalyticsResponse {
totalCalls: number;
totalPromptTokens: number;
totalCompletionTokens: number;
totalTokens: number;
totalCostCents: number;
averageDurationMs: number;
byProvider: ProviderUsageAnalyticsItem[];
byModel: ModelUsageAnalyticsItem[];
byTaskType: TaskTypeUsageAnalyticsItem[];
}
const TASK_OUTCOME_COLORS = ["#6EBF8B", "#F5C862", "#94A3B8", "#C4A5DE", "#7AA2F7"];
const DAYS_BY_RANGE: Record<TimeRange, number> = {
"7d": 7,
"30d": 30,
"90d": 90,
};
const analyticsRequestCache = new Map<TimeRange, Promise<UsageAnalyticsResponse>>();
function buildAnalyticsEndpoint(timeRange: TimeRange): string {
const endDate = new Date();
const startDate = new Date(endDate);
startDate.setDate(startDate.getDate() - (DAYS_BY_RANGE[timeRange] - 1));
startDate.setHours(0, 0, 0, 0);
const query = new URLSearchParams({
startDate: startDate.toISOString(),
endDate: endDate.toISOString(),
}).toString();
return `/api/llm-usage/analytics?${query}`;
}
async function fetchUsageAnalytics(timeRange: TimeRange): Promise<UsageAnalyticsResponse> {
const cachedRequest = analyticsRequestCache.get(timeRange);
if (cachedRequest) {
return cachedRequest;
}
const request = apiGet<ApiResponse<UsageAnalyticsResponse>>(buildAnalyticsEndpoint(timeRange))
.then((response) => response.data)
.finally(() => {
analyticsRequestCache.delete(timeRange);
});
analyticsRequestCache.set(timeRange, request);
return request;
}
// ─── API Functions ───────────────────────────────────────────────────
/**
* Fetch usage summary data (total tokens, cost, task count, quality rate)
*/
export async function fetchUsageSummary(timeRange: TimeRange): Promise<UsageSummary> {
const analytics = await fetchUsageAnalytics(timeRange);
return {
totalTokens: analytics.totalTokens,
totalCost: analytics.totalCostCents / 100,
taskCount: analytics.totalCalls,
avgQualityGatePassRate: 0,
};
}
/**
* Fetch token usage time series for charts
*/
export function fetchTokenUsage(timeRange: TimeRange): Promise<TokenUsagePoint[]> {
void timeRange;
return Promise.resolve([]);
}
/**
* Fetch cost breakdown by model
*/
export async function fetchCostBreakdown(timeRange: TimeRange): Promise<CostBreakdownItem[]> {
const analytics = await fetchUsageAnalytics(timeRange);
return analytics.byModel
.filter((item) => item.calls > 0)
.sort((a, b) => b.costCents - a.costCents)
.map((item) => ({
model: item.model,
provider: "unknown",
cost: item.costCents / 100,
taskCount: item.calls,
}));
}
/**
* Fetch task outcome distribution
*/
export async function fetchTaskOutcomes(timeRange: TimeRange): Promise<TaskOutcomeItem[]> {
const analytics = await fetchUsageAnalytics(timeRange);
return analytics.byTaskType
.filter((item) => item.calls > 0)
.map((item, index) => ({
outcome: item.taskType,
count: item.calls,
color: TASK_OUTCOME_COLORS[index % TASK_OUTCOME_COLORS.length] ?? "#94A3B8",
}));
}
/**
* Fetch cost/token estimate for a given task configuration.
* Uses the real GET /api/telemetry/estimate endpoint from TEL-006.
*/
export async function fetchEstimate(params: EstimateParams): Promise<EstimateResponse> {
const query = new URLSearchParams({
taskType: params.taskType,
model: params.model,
provider: params.provider,
complexity: params.complexity,
}).toString();
const response = await apiGet<ApiResponse<EstimateResponse>>(`/api/telemetry/estimate?${query}`);
return response.data;
}