Files
stack/apps/web/src/hooks/useTextToSpeech.test.ts
Jason Woltje 74d6c1092e
All checks were successful
ci/woodpecker/push/web Pipeline was successful
feat(#403): add audio playback component for TTS output
Implements AudioPlayer inline component with play/pause, progress bar,
speed control (0.5x-2x), download, and duration display. Adds
TextToSpeechButton "Read aloud" component that synthesizes text via
the speech API and integrates AudioPlayer for playback. Includes
useTextToSpeech hook with API integration, audio caching, and
playback state management. All 32 tests passing.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-15 03:05:39 -06:00

286 lines
8.2 KiB
TypeScript

/**
* @file useTextToSpeech.test.ts
* @description Tests for the useTextToSpeech hook that manages TTS API integration
*/
import { renderHook, act } from "@testing-library/react";
import { describe, it, expect, beforeEach, vi, afterEach } from "vitest";
import { useTextToSpeech } from "./useTextToSpeech";
import * as speechApi from "@/lib/api/speech";
// Mock the speech API module
vi.mock("@/lib/api/speech", () => ({
synthesizeSpeech: vi.fn(),
getVoices: vi.fn(),
}));
// Mock URL.createObjectURL and URL.revokeObjectURL
const mockCreateObjectURL = vi.fn().mockReturnValue("blob:mock-audio-url");
const mockRevokeObjectURL = vi.fn();
beforeEach(() => {
global.URL.createObjectURL = mockCreateObjectURL;
global.URL.revokeObjectURL = mockRevokeObjectURL;
});
// Mock HTMLAudioElement
class MockAudio {
src = "";
currentTime = 0;
duration = 120;
paused = true;
playbackRate = 1;
volume = 1;
onended: (() => void) | null = null;
ontimeupdate: (() => void) | null = null;
onloadedmetadata: (() => void) | null = null;
onerror: ((e: unknown) => void) | null = null;
play(): Promise<void> {
this.paused = false;
return Promise.resolve();
}
pause(): void {
this.paused = true;
}
addEventListener(event: string, handler: () => void): void {
if (event === "ended") this.onended = handler;
if (event === "timeupdate") this.ontimeupdate = handler;
if (event === "loadedmetadata") this.onloadedmetadata = handler;
if (event === "error") this.onerror = handler;
}
removeEventListener(): void {
// no-op for tests
}
}
vi.stubGlobal("Audio", MockAudio);
const mockSynthesizeSpeech = speechApi.synthesizeSpeech as ReturnType<typeof vi.fn>;
describe("useTextToSpeech", () => {
beforeEach(() => {
vi.clearAllMocks();
mockCreateObjectURL.mockReturnValue("blob:mock-audio-url");
});
afterEach(() => {
vi.restoreAllMocks();
});
describe("initial state", () => {
it("should return correct initial interface", () => {
const { result } = renderHook(() => useTextToSpeech());
expect(result.current.synthesize).toBeTypeOf("function");
expect(result.current.play).toBeTypeOf("function");
expect(result.current.pause).toBeTypeOf("function");
expect(result.current.stop).toBeTypeOf("function");
expect(result.current.audioUrl).toBeNull();
expect(result.current.isLoading).toBe(false);
expect(result.current.error).toBeNull();
expect(result.current.isPlaying).toBe(false);
expect(result.current.duration).toBe(0);
expect(result.current.currentTime).toBe(0);
});
});
describe("synthesize", () => {
it("should call API and return audio blob URL", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValueOnce(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello world");
});
expect(mockSynthesizeSpeech).toHaveBeenCalledWith({
text: "Hello world",
});
expect(result.current.audioUrl).toBe("blob:mock-audio-url");
expect(result.current.isLoading).toBe(false);
expect(result.current.error).toBeNull();
});
it("should pass voice and tier options to API", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValueOnce(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello", {
voice: "alloy",
tier: "premium",
speed: 1.5,
});
});
expect(mockSynthesizeSpeech).toHaveBeenCalledWith({
text: "Hello",
voice: "alloy",
tier: "premium",
speed: 1.5,
});
});
it("should set loading state while synthesizing", async () => {
let resolvePromise: ((value: Blob) => void) | undefined;
const pendingPromise = new Promise<Blob>((resolve) => {
resolvePromise = resolve;
});
mockSynthesizeSpeech.mockReturnValueOnce(pendingPromise);
const { result } = renderHook(() => useTextToSpeech());
act(() => {
void result.current.synthesize("Hello");
});
expect(result.current.isLoading).toBe(true);
await act(async () => {
resolvePromise?.(new Blob(["audio"], { type: "audio/mpeg" }));
await pendingPromise;
});
expect(result.current.isLoading).toBe(false);
});
it("should handle API errors gracefully", async () => {
mockSynthesizeSpeech.mockRejectedValueOnce(new Error("Synthesis failed"));
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello");
});
expect(result.current.error).toBe("Synthesis failed");
expect(result.current.isLoading).toBe(false);
expect(result.current.audioUrl).toBeNull();
});
it("should cache audio for repeated synthesis of same text", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValue(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
// First call
await act(async () => {
await result.current.synthesize("Hello world");
});
// Second call with same text
await act(async () => {
await result.current.synthesize("Hello world");
});
// API should only be called once due to caching
expect(mockSynthesizeSpeech).toHaveBeenCalledTimes(1);
});
it("should not cache when options differ", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValue(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello", { voice: "alloy" });
});
await act(async () => {
await result.current.synthesize("Hello", { voice: "nova" });
});
expect(mockSynthesizeSpeech).toHaveBeenCalledTimes(2);
});
});
describe("playback controls", () => {
it("should play audio after synthesis", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValueOnce(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello");
});
await act(async () => {
await result.current.play();
});
expect(result.current.isPlaying).toBe(true);
});
it("should pause audio playback", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValueOnce(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello");
});
await act(async () => {
await result.current.play();
});
act(() => {
result.current.pause();
});
expect(result.current.isPlaying).toBe(false);
});
it("should stop and reset playback", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValueOnce(mockBlob);
const { result } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello");
});
await act(async () => {
await result.current.play();
});
act(() => {
result.current.stop();
});
expect(result.current.isPlaying).toBe(false);
expect(result.current.currentTime).toBe(0);
});
});
describe("cleanup", () => {
it("should revoke object URLs on unmount", async () => {
const mockBlob = new Blob(["audio-data"], { type: "audio/mpeg" });
mockSynthesizeSpeech.mockResolvedValueOnce(mockBlob);
const { result, unmount } = renderHook(() => useTextToSpeech());
await act(async () => {
await result.current.synthesize("Hello");
});
unmount();
expect(mockRevokeObjectURL).toHaveBeenCalled();
});
});
});