fix(web): remove mock data from dashboard telemetry/tasks/calendar (#656)
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
All checks were successful
ci/woodpecker/push/ci Pipeline was successful
Co-authored-by: Jason Woltje <jason@diversecanvas.com> Co-committed-by: Jason Woltje <jason@diversecanvas.com>
This commit was merged in pull request #656.
This commit is contained in:
53
apps/web/src/lib/api/telemetry.test.ts
Normal file
53
apps/web/src/lib/api/telemetry.test.ts
Normal file
@@ -0,0 +1,53 @@
|
||||
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
|
||||
import { fetchUsageSummary } from "./telemetry";
|
||||
|
||||
vi.mock("./client", () => ({
|
||||
apiGet: vi.fn(),
|
||||
}));
|
||||
|
||||
const { apiGet } = await import("./client");
|
||||
|
||||
describe("Telemetry API Client", (): void => {
|
||||
beforeEach((): void => {
|
||||
vi.clearAllMocks();
|
||||
vi.useFakeTimers();
|
||||
vi.setSystemTime(new Date("2026-03-02T12:00:00Z"));
|
||||
});
|
||||
|
||||
afterEach((): void => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it("fetches usage summary from llm usage analytics endpoint", async (): Promise<void> => {
|
||||
vi.mocked(apiGet).mockResolvedValueOnce({
|
||||
data: {
|
||||
totalCalls: 47,
|
||||
totalPromptTokens: 120000,
|
||||
totalCompletionTokens: 125800,
|
||||
totalTokens: 245800,
|
||||
totalCostCents: 342,
|
||||
averageDurationMs: 3200,
|
||||
byProvider: [],
|
||||
byModel: [],
|
||||
byTaskType: [],
|
||||
},
|
||||
});
|
||||
|
||||
const result = await fetchUsageSummary("30d");
|
||||
|
||||
const calledEndpoint = vi.mocked(apiGet).mock.calls[0]?.[0];
|
||||
expect(calledEndpoint).toMatch(/^\/api\/llm-usage\/analytics\?/);
|
||||
|
||||
const queryString = calledEndpoint?.split("?")[1] ?? "";
|
||||
const params = new URLSearchParams(queryString);
|
||||
expect(params.get("startDate")).toBeTruthy();
|
||||
expect(params.get("endDate")).toBeTruthy();
|
||||
|
||||
expect(result).toEqual({
|
||||
totalTokens: 245800,
|
||||
totalCost: 3.42,
|
||||
taskCount: 47,
|
||||
avgQualityGatePassRate: 0,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,10 +1,6 @@
|
||||
/**
|
||||
* Telemetry API Client
|
||||
* Handles telemetry data fetching for the usage dashboard.
|
||||
*
|
||||
* NOTE: Currently returns mock/placeholder data since the telemetry API
|
||||
* aggregation endpoints don't exist yet. The important thing is the UI structure.
|
||||
* When the backend endpoints are ready, replace mock calls with real apiGet() calls.
|
||||
*/
|
||||
|
||||
import { apiGet, type ApiResponse } from "./client";
|
||||
@@ -60,65 +56,84 @@ export interface EstimateResponse {
|
||||
};
|
||||
}
|
||||
|
||||
// ─── Mock Data Generators ────────────────────────────────────────────
|
||||
interface ProviderUsageAnalyticsItem {
|
||||
provider: string;
|
||||
calls: number;
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
costCents: number;
|
||||
averageDurationMs: number;
|
||||
}
|
||||
|
||||
function generateDateRange(range: TimeRange): string[] {
|
||||
const days = range === "7d" ? 7 : range === "30d" ? 30 : 90;
|
||||
const dates: string[] = [];
|
||||
const now = new Date();
|
||||
interface ModelUsageAnalyticsItem {
|
||||
model: string;
|
||||
calls: number;
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
costCents: number;
|
||||
averageDurationMs: number;
|
||||
}
|
||||
|
||||
for (let i = days - 1; i >= 0; i--) {
|
||||
const d = new Date(now);
|
||||
d.setDate(d.getDate() - i);
|
||||
dates.push(d.toISOString().split("T")[0] ?? "");
|
||||
interface TaskTypeUsageAnalyticsItem {
|
||||
taskType: string;
|
||||
calls: number;
|
||||
promptTokens: number;
|
||||
completionTokens: number;
|
||||
totalTokens: number;
|
||||
costCents: number;
|
||||
averageDurationMs: number;
|
||||
}
|
||||
|
||||
interface UsageAnalyticsResponse {
|
||||
totalCalls: number;
|
||||
totalPromptTokens: number;
|
||||
totalCompletionTokens: number;
|
||||
totalTokens: number;
|
||||
totalCostCents: number;
|
||||
averageDurationMs: number;
|
||||
byProvider: ProviderUsageAnalyticsItem[];
|
||||
byModel: ModelUsageAnalyticsItem[];
|
||||
byTaskType: TaskTypeUsageAnalyticsItem[];
|
||||
}
|
||||
|
||||
const TASK_OUTCOME_COLORS = ["#6EBF8B", "#F5C862", "#94A3B8", "#C4A5DE", "#7AA2F7"];
|
||||
const DAYS_BY_RANGE: Record<TimeRange, number> = {
|
||||
"7d": 7,
|
||||
"30d": 30,
|
||||
"90d": 90,
|
||||
};
|
||||
const analyticsRequestCache = new Map<TimeRange, Promise<UsageAnalyticsResponse>>();
|
||||
|
||||
function buildAnalyticsEndpoint(timeRange: TimeRange): string {
|
||||
const endDate = new Date();
|
||||
const startDate = new Date(endDate);
|
||||
startDate.setDate(startDate.getDate() - (DAYS_BY_RANGE[timeRange] - 1));
|
||||
startDate.setHours(0, 0, 0, 0);
|
||||
|
||||
const query = new URLSearchParams({
|
||||
startDate: startDate.toISOString(),
|
||||
endDate: endDate.toISOString(),
|
||||
}).toString();
|
||||
|
||||
return `/api/llm-usage/analytics?${query}`;
|
||||
}
|
||||
|
||||
async function fetchUsageAnalytics(timeRange: TimeRange): Promise<UsageAnalyticsResponse> {
|
||||
const cachedRequest = analyticsRequestCache.get(timeRange);
|
||||
if (cachedRequest) {
|
||||
return cachedRequest;
|
||||
}
|
||||
|
||||
return dates;
|
||||
}
|
||||
const request = apiGet<ApiResponse<UsageAnalyticsResponse>>(buildAnalyticsEndpoint(timeRange))
|
||||
.then((response) => response.data)
|
||||
.finally(() => {
|
||||
analyticsRequestCache.delete(timeRange);
|
||||
});
|
||||
|
||||
function generateMockTokenUsage(range: TimeRange): TokenUsagePoint[] {
|
||||
const dates = generateDateRange(range);
|
||||
|
||||
return dates.map((date) => {
|
||||
const baseInput = 8000 + Math.floor(Math.random() * 12000);
|
||||
const baseOutput = 3000 + Math.floor(Math.random() * 7000);
|
||||
return {
|
||||
date,
|
||||
inputTokens: baseInput,
|
||||
outputTokens: baseOutput,
|
||||
totalTokens: baseInput + baseOutput,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
function generateMockSummary(range: TimeRange): UsageSummary {
|
||||
const multiplier = range === "7d" ? 1 : range === "30d" ? 4 : 12;
|
||||
return {
|
||||
totalTokens: 245_800 * multiplier,
|
||||
totalCost: 3.42 * multiplier,
|
||||
taskCount: 47 * multiplier,
|
||||
avgQualityGatePassRate: 0.87,
|
||||
};
|
||||
}
|
||||
|
||||
function generateMockCostBreakdown(): CostBreakdownItem[] {
|
||||
return [
|
||||
{ model: "claude-sonnet-4-5", provider: "anthropic", cost: 18.5, taskCount: 124 },
|
||||
{ model: "gpt-4o", provider: "openai", cost: 12.3, taskCount: 89 },
|
||||
{ model: "claude-haiku-3.5", provider: "anthropic", cost: 4.2, taskCount: 156 },
|
||||
{ model: "llama-3.3-70b", provider: "ollama", cost: 0, taskCount: 67 },
|
||||
{ model: "gemini-2.0-flash", provider: "google", cost: 2.8, taskCount: 42 },
|
||||
];
|
||||
}
|
||||
|
||||
// PDA-friendly colors: calm, no aggressive reds
|
||||
function generateMockTaskOutcomes(): TaskOutcomeItem[] {
|
||||
return [
|
||||
{ outcome: "Success", count: 312, color: "#6EBF8B" },
|
||||
{ outcome: "Partial", count: 48, color: "#F5C862" },
|
||||
{ outcome: "Timeout", count: 18, color: "#94A3B8" },
|
||||
{ outcome: "Incomplete", count: 22, color: "#C4A5DE" },
|
||||
];
|
||||
analyticsRequestCache.set(timeRange, request);
|
||||
return request;
|
||||
}
|
||||
|
||||
// ─── API Functions ───────────────────────────────────────────────────
|
||||
@@ -127,47 +142,54 @@ function generateMockTaskOutcomes(): TaskOutcomeItem[] {
|
||||
* Fetch usage summary data (total tokens, cost, task count, quality rate)
|
||||
*/
|
||||
export async function fetchUsageSummary(timeRange: TimeRange): Promise<UsageSummary> {
|
||||
// TODO: Replace with real API call when backend aggregation endpoints are ready
|
||||
// const response = await apiGet<ApiResponse<UsageSummary>>(`/api/telemetry/summary?range=${timeRange}`);
|
||||
// return response.data;
|
||||
void apiGet; // suppress unused import warning in the meantime
|
||||
await new Promise((resolve) => setTimeout(resolve, 200));
|
||||
return generateMockSummary(timeRange);
|
||||
const analytics = await fetchUsageAnalytics(timeRange);
|
||||
|
||||
return {
|
||||
totalTokens: analytics.totalTokens,
|
||||
totalCost: analytics.totalCostCents / 100,
|
||||
taskCount: analytics.totalCalls,
|
||||
avgQualityGatePassRate: 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch token usage time series for charts
|
||||
*/
|
||||
export async function fetchTokenUsage(timeRange: TimeRange): Promise<TokenUsagePoint[]> {
|
||||
// TODO: Replace with real API call
|
||||
// const response = await apiGet<ApiResponse<TokenUsagePoint[]>>(`/api/telemetry/tokens?range=${timeRange}`);
|
||||
// return response.data;
|
||||
await new Promise((resolve) => setTimeout(resolve, 250));
|
||||
return generateMockTokenUsage(timeRange);
|
||||
export function fetchTokenUsage(timeRange: TimeRange): Promise<TokenUsagePoint[]> {
|
||||
void timeRange;
|
||||
return Promise.resolve([]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch cost breakdown by model
|
||||
*/
|
||||
export async function fetchCostBreakdown(timeRange: TimeRange): Promise<CostBreakdownItem[]> {
|
||||
// TODO: Replace with real API call
|
||||
// const response = await apiGet<ApiResponse<CostBreakdownItem[]>>(`/api/telemetry/costs?range=${timeRange}`);
|
||||
// return response.data;
|
||||
await new Promise((resolve) => setTimeout(resolve, 200));
|
||||
void timeRange;
|
||||
return generateMockCostBreakdown();
|
||||
const analytics = await fetchUsageAnalytics(timeRange);
|
||||
|
||||
return analytics.byModel
|
||||
.filter((item) => item.calls > 0)
|
||||
.sort((a, b) => b.costCents - a.costCents)
|
||||
.map((item) => ({
|
||||
model: item.model,
|
||||
provider: "unknown",
|
||||
cost: item.costCents / 100,
|
||||
taskCount: item.calls,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch task outcome distribution
|
||||
*/
|
||||
export async function fetchTaskOutcomes(timeRange: TimeRange): Promise<TaskOutcomeItem[]> {
|
||||
// TODO: Replace with real API call
|
||||
// const response = await apiGet<ApiResponse<TaskOutcomeItem[]>>(`/api/telemetry/outcomes?range=${timeRange}`);
|
||||
// return response.data;
|
||||
await new Promise((resolve) => setTimeout(resolve, 150));
|
||||
void timeRange;
|
||||
return generateMockTaskOutcomes();
|
||||
const analytics = await fetchUsageAnalytics(timeRange);
|
||||
|
||||
return analytics.byTaskType
|
||||
.filter((item) => item.calls > 0)
|
||||
.map((item, index) => ({
|
||||
outcome: item.taskType,
|
||||
count: item.calls,
|
||||
color: TASK_OUTCOME_COLORS[index % TASK_OUTCOME_COLORS.length] ?? "#94A3B8",
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user