Files
telemetry-client-py/scripts/validate_schema.py

97 lines
3.2 KiB
Python

#!/usr/bin/env python3
"""Validate that the SDK types match the expected schema."""
from __future__ import annotations
import json
import sys
from mosaicstack_telemetry.types.events import (
Complexity,
Harness,
Outcome,
Provider,
QualityGate,
RepoSizeCategory,
TaskCompletionEvent,
TaskType,
)
from mosaicstack_telemetry.types.predictions import (
PredictionQuery,
PredictionResponse,
)
def main() -> int:
"""Validate schema by generating JSON schema for key models."""
errors: list[str] = []
# Validate enums have expected values
expected_task_types = {
"planning", "implementation", "code_review", "testing",
"debugging", "refactoring", "documentation", "configuration",
"security_audit", "unknown",
}
actual_task_types = {t.value for t in TaskType}
if actual_task_types != expected_task_types:
errors.append(f"TaskType mismatch: {actual_task_types ^ expected_task_types}")
expected_complexity = {"low", "medium", "high", "critical"}
actual_complexity = {c.value for c in Complexity}
if actual_complexity != expected_complexity:
errors.append(f"Complexity mismatch: {actual_complexity ^ expected_complexity}")
expected_harness = {
"claude_code", "opencode", "kilo_code", "aider",
"api_direct", "ollama_local", "custom", "unknown",
}
actual_harness = {h.value for h in Harness}
if actual_harness != expected_harness:
errors.append(f"Harness mismatch: {actual_harness ^ expected_harness}")
expected_provider = {
"anthropic", "openai", "openrouter", "ollama",
"google", "mistral", "custom", "unknown",
}
actual_provider = {p.value for p in Provider}
if actual_provider != expected_provider:
errors.append(f"Provider mismatch: {actual_provider ^ expected_provider}")
expected_gates = {"build", "lint", "test", "coverage", "typecheck", "security"}
actual_gates = {g.value for g in QualityGate}
if actual_gates != expected_gates:
errors.append(f"QualityGate mismatch: {actual_gates ^ expected_gates}")
expected_outcome = {"success", "failure", "partial", "timeout"}
actual_outcome = {o.value for o in Outcome}
if actual_outcome != expected_outcome:
errors.append(f"Outcome mismatch: {actual_outcome ^ expected_outcome}")
expected_repo_size = {"tiny", "small", "medium", "large", "huge"}
actual_repo_size = {r.value for r in RepoSizeCategory}
if actual_repo_size != expected_repo_size:
errors.append(f"RepoSizeCategory mismatch: {actual_repo_size ^ expected_repo_size}")
# Generate JSON schemas for key models
print("TaskCompletionEvent schema:")
print(json.dumps(TaskCompletionEvent.model_json_schema(), indent=2))
print()
print("PredictionQuery schema:")
print(json.dumps(PredictionQuery.model_json_schema(), indent=2))
print()
print("PredictionResponse schema:")
print(json.dumps(PredictionResponse.model_json_schema(), indent=2))
if errors:
print("\nVALIDATION ERRORS:")
for error in errors:
print(f" - {error}")
return 1
print("\nAll schema validations passed.")
return 0
if __name__ == "__main__":
sys.exit(main())