- Log at ERROR level when falling back to in-memory storage - Track and expose degraded mode status for health checks - Add isUsingFallback() method to check fallback state - Add getHealthStatus() method for health check endpoints - Add comprehensive tests for fallback behavior and health status Refs #338 Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
223 lines
6.5 KiB
TypeScript
223 lines
6.5 KiB
TypeScript
import { Injectable, OnModuleInit, Logger } from "@nestjs/common";
|
|
import { ThrottlerStorage } from "@nestjs/throttler";
|
|
import Redis from "ioredis";
|
|
|
|
/**
|
|
* Throttler storage record interface
|
|
* Matches @nestjs/throttler's ThrottlerStorageRecord
|
|
*/
|
|
interface ThrottlerStorageRecord {
|
|
totalHits: number;
|
|
timeToExpire: number;
|
|
isBlocked: boolean;
|
|
timeToBlockExpire: number;
|
|
}
|
|
|
|
/**
|
|
* Redis-based storage for rate limiting using Valkey
|
|
*
|
|
* This service uses Valkey (Redis-compatible) as the storage backend
|
|
* for rate limiting. This allows rate limits to work across multiple
|
|
* API instances in a distributed environment.
|
|
*
|
|
* If Redis is unavailable, falls back to in-memory storage.
|
|
*/
|
|
@Injectable()
|
|
export class ThrottlerValkeyStorageService implements ThrottlerStorage, OnModuleInit {
|
|
private readonly logger = new Logger(ThrottlerValkeyStorageService.name);
|
|
private client: Redis | undefined = undefined;
|
|
private readonly THROTTLER_PREFIX = "mosaic:throttler:";
|
|
private readonly fallbackStorage = new Map<string, number[]>();
|
|
private useRedis = false;
|
|
|
|
async onModuleInit(): Promise<void> {
|
|
const valkeyUrl = process.env.VALKEY_URL ?? "redis://localhost:6379";
|
|
|
|
try {
|
|
this.logger.log(`Connecting to Valkey for rate limiting at ${valkeyUrl}`);
|
|
|
|
this.client = new Redis(valkeyUrl, {
|
|
maxRetriesPerRequest: 3,
|
|
retryStrategy: (times: number) => {
|
|
const delay = Math.min(times * 50, 2000);
|
|
return delay;
|
|
},
|
|
lazyConnect: true, // Don't connect immediately
|
|
});
|
|
|
|
// Try to connect
|
|
await this.client.connect();
|
|
await this.client.ping();
|
|
|
|
this.useRedis = true;
|
|
this.logger.log("Valkey connected successfully for rate limiting");
|
|
} catch (error) {
|
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
this.logger.error(`Failed to connect to Valkey for rate limiting: ${errorMessage}`);
|
|
this.logger.error(
|
|
"DEGRADED MODE: Falling back to in-memory rate limiting storage. " +
|
|
"Rate limits will not be shared across API instances."
|
|
);
|
|
this.useRedis = false;
|
|
this.client = undefined;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Increment the number of requests for a given key
|
|
*
|
|
* @param key - Throttle key (e.g., "apikey:xxx" or "ip:192.168.1.1")
|
|
* @param ttl - Time to live in milliseconds
|
|
* @param limit - Maximum number of requests allowed
|
|
* @param blockDuration - Duration to block in milliseconds (not used in this implementation)
|
|
* @param _throttlerName - Name of the throttler (not used in this implementation)
|
|
* @returns Promise resolving to the current throttler storage record
|
|
*/
|
|
async increment(
|
|
key: string,
|
|
ttl: number,
|
|
limit: number,
|
|
blockDuration: number,
|
|
_throttlerName: string
|
|
): Promise<ThrottlerStorageRecord> {
|
|
const throttleKey = this.getThrottleKey(key);
|
|
let totalHits: number;
|
|
|
|
if (this.useRedis && this.client) {
|
|
try {
|
|
const result = await this.client.multi().incr(throttleKey).pexpire(throttleKey, ttl).exec();
|
|
|
|
if (result?.[0]?.[1]) {
|
|
totalHits = result[0][1] as number;
|
|
} else {
|
|
totalHits = this.incrementMemory(throttleKey, ttl);
|
|
}
|
|
} catch (error) {
|
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
this.logger.error(`Redis increment failed: ${errorMessage}`);
|
|
// Fall through to in-memory
|
|
totalHits = this.incrementMemory(throttleKey, ttl);
|
|
}
|
|
} else {
|
|
// In-memory fallback
|
|
totalHits = this.incrementMemory(throttleKey, ttl);
|
|
}
|
|
|
|
// Return ThrottlerStorageRecord
|
|
const isBlocked = totalHits > limit;
|
|
return {
|
|
totalHits,
|
|
timeToExpire: ttl,
|
|
isBlocked,
|
|
timeToBlockExpire: isBlocked ? blockDuration : 0,
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Get the current number of requests for a given key
|
|
*
|
|
* @param key - Throttle key
|
|
* @returns Promise resolving to the current number of requests
|
|
*/
|
|
async get(key: string): Promise<number> {
|
|
const throttleKey = this.getThrottleKey(key);
|
|
|
|
if (this.useRedis && this.client) {
|
|
try {
|
|
const value = await this.client.get(throttleKey);
|
|
return value ? parseInt(value, 10) : 0;
|
|
} catch (error) {
|
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
this.logger.error(`Redis get failed: ${errorMessage}`);
|
|
// Fall through to in-memory
|
|
}
|
|
}
|
|
|
|
// In-memory fallback
|
|
return this.getMemory(throttleKey);
|
|
}
|
|
|
|
/**
|
|
* In-memory increment implementation
|
|
*/
|
|
private incrementMemory(key: string, ttl: number): number {
|
|
const now = Date.now();
|
|
const timestamps = this.fallbackStorage.get(key) ?? [];
|
|
|
|
// Remove expired timestamps
|
|
const validTimestamps = timestamps.filter((timestamp) => now - timestamp < ttl);
|
|
|
|
// Add new timestamp
|
|
validTimestamps.push(now);
|
|
|
|
// Store updated timestamps
|
|
this.fallbackStorage.set(key, validTimestamps);
|
|
|
|
return validTimestamps.length;
|
|
}
|
|
|
|
/**
|
|
* In-memory get implementation
|
|
*/
|
|
private getMemory(key: string): number {
|
|
const timestamps = this.fallbackStorage.get(key);
|
|
return timestamps ? timestamps.length : 0;
|
|
}
|
|
|
|
/**
|
|
* Get throttle key with prefix
|
|
*/
|
|
private getThrottleKey(key: string): string {
|
|
return `${this.THROTTLER_PREFIX}${key}`;
|
|
}
|
|
|
|
/**
|
|
* Check if the service is using fallback in-memory storage
|
|
*
|
|
* This indicates a degraded state where rate limits are not shared
|
|
* across API instances. Use this for health checks.
|
|
*
|
|
* @returns true if using in-memory fallback, false if using Redis
|
|
*/
|
|
isUsingFallback(): boolean {
|
|
return !this.useRedis;
|
|
}
|
|
|
|
/**
|
|
* Get rate limiter health status for health check endpoints
|
|
*
|
|
* @returns Health status object with storage mode and details
|
|
*/
|
|
getHealthStatus(): {
|
|
healthy: boolean;
|
|
mode: "redis" | "memory";
|
|
degraded: boolean;
|
|
message: string;
|
|
} {
|
|
if (this.useRedis) {
|
|
return {
|
|
healthy: true,
|
|
mode: "redis",
|
|
degraded: false,
|
|
message: "Rate limiter using Redis storage (distributed mode)",
|
|
};
|
|
}
|
|
return {
|
|
healthy: true, // Service is functional, but degraded
|
|
mode: "memory",
|
|
degraded: true,
|
|
message:
|
|
"Rate limiter using in-memory fallback (degraded mode - limits not shared across instances)",
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Clean up on module destroy
|
|
*/
|
|
async onModuleDestroy(): Promise<void> {
|
|
if (this.client) {
|
|
await this.client.quit();
|
|
}
|
|
}
|
|
}
|