Redis is a fast in-memory data store widely used for caching, session management, real-time features, and more. This article provides a practical guide to effective caching strategies with Redis.
Redis Basics
Connection Configuration
// redis.ts
import Redis from 'ioredis';
// Single instance
const redis = new Redis({
host: process.env.REDIS_HOST || 'localhost',
port: parseInt(process.env.REDIS_PORT || '6379'),
password: process.env.REDIS_PASSWORD,
db: 0,
retryStrategy: (times) => {
const delay = Math.min(times * 50, 2000);
return delay;
},
maxRetriesPerRequest: 3,
enableReadyCheck: true,
lazyConnect: true,
});
// Cluster configuration
const cluster = new Redis.Cluster([
{ host: 'redis-node-1', port: 6379 },
{ host: 'redis-node-2', port: 6379 },
{ host: 'redis-node-3', port: 6379 },
], {
redisOptions: {
password: process.env.REDIS_PASSWORD,
},
scaleReads: 'slave', // Distribute reads to slaves
maxRedirections: 16,
});
// Connection events
redis.on('connect', () => console.log('Redis connected'));
redis.on('error', (err) => console.error('Redis error:', err));
redis.on('close', () => console.log('Redis connection closed'));
export { redis, cluster };
Data Types Overview
Redis Data Types
| Data Type | Use Cases |
|---|---|
| String | Caching, counters, sessions |
| Hash | Object storage, user profiles |
| List | Queues, timelines, logs |
| Set | Tags, unique values, relationships |
| Sorted Set | Rankings, scoreboards, timelines |
| Stream | Event sourcing, messaging |
| JSON | Complex objects (RedisJSON) |
Cache Patterns
Cache-Aside Pattern
// cache-aside.ts
import { redis } from './redis';
interface CacheOptions {
ttl?: number; // seconds
prefix?: string;
}
class CacheAside<T> {
private prefix: string;
private defaultTtl: number;
constructor(prefix: string, defaultTtl: number = 3600) {
this.prefix = prefix;
this.defaultTtl = defaultTtl;
}
private getKey(id: string): string {
return `${this.prefix}:${id}`;
}
async get(id: string): Promise<T | null> {
const cached = await redis.get(this.getKey(id));
if (cached) {
return JSON.parse(cached);
}
return null;
}
async set(id: string, data: T, ttl?: number): Promise<void> {
const key = this.getKey(id);
const serialized = JSON.stringify(data);
await redis.setex(key, ttl || this.defaultTtl, serialized);
}
async getOrFetch(
id: string,
fetcher: () => Promise<T | null>,
ttl?: number
): Promise<T | null> {
// 1. Check cache
const cached = await this.get(id);
if (cached) {
return cached;
}
// 2. Fetch from data source
const data = await fetcher();
if (data === null) {
return null;
}
// 3. Store in cache
await this.set(id, data, ttl);
return data;
}
async invalidate(id: string): Promise<void> {
await redis.del(this.getKey(id));
}
async invalidatePattern(pattern: string): Promise<void> {
const keys = await redis.keys(`${this.prefix}:${pattern}`);
if (keys.length > 0) {
await redis.del(...keys);
}
}
}
// Usage example
interface User {
id: string;
name: string;
email: string;
}
const userCache = new CacheAside<User>('user', 3600);
async function getUser(id: string): Promise<User | null> {
return userCache.getOrFetch(id, async () => {
// Fetch from database
const user = await db.user.findUnique({ where: { id } });
return user;
});
}
async function updateUser(id: string, data: Partial<User>): Promise<User> {
// Update database
const user = await db.user.update({
where: { id },
data,
});
// Invalidate cache
await userCache.invalidate(id);
return user;
}
Write-Through Pattern
// write-through.ts
class WriteThrough<T extends { id: string }> {
private cache: CacheAside<T>;
constructor(
prefix: string,
private repository: Repository<T>,
ttl: number = 3600
) {
this.cache = new CacheAside<T>(prefix, ttl);
}
async get(id: string): Promise<T | null> {
return this.cache.getOrFetch(id, () => this.repository.findById(id));
}
async create(data: Omit<T, 'id'>): Promise<T> {
// 1. Write to database
const entity = await this.repository.create(data);
// 2. Simultaneously write to cache
await this.cache.set(entity.id, entity);
return entity;
}
async update(id: string, data: Partial<T>): Promise<T> {
// 1. Update database
const entity = await this.repository.update(id, data);
// 2. Update cache
await this.cache.set(id, entity);
return entity;
}
async delete(id: string): Promise<void> {
// 1. Delete from database
await this.repository.delete(id);
// 2. Delete from cache
await this.cache.invalidate(id);
}
}
Write-Behind Pattern
// write-behind.ts
import { redis } from './redis';
class WriteBehind<T extends { id: string }> {
private pendingWrites: Map<string, T> = new Map();
private flushInterval: NodeJS.Timeout;
constructor(
private prefix: string,
private repository: Repository<T>,
private batchSize: number = 100,
private flushIntervalMs: number = 5000
) {
// Periodic batch writes
this.flushInterval = setInterval(
() => this.flush(),
flushIntervalMs
);
}
async write(entity: T): Promise<void> {
// 1. Write to cache immediately
await redis.setex(
`${this.prefix}:${entity.id}`,
3600,
JSON.stringify(entity)
);
// 2. Add to pending queue
this.pendingWrites.set(entity.id, entity);
// Flush when batch size is reached
if (this.pendingWrites.size >= this.batchSize) {
await this.flush();
}
}
async get(id: string): Promise<T | null> {
// Return from pending if exists
if (this.pendingWrites.has(id)) {
return this.pendingWrites.get(id)!;
}
// Check cache
const cached = await redis.get(`${this.prefix}:${id}`);
if (cached) {
return JSON.parse(cached);
}
// Fetch from database
return this.repository.findById(id);
}
private async flush(): Promise<void> {
if (this.pendingWrites.size === 0) return;
const writes = Array.from(this.pendingWrites.values());
this.pendingWrites.clear();
try {
// Batch write to database
await this.repository.upsertMany(writes);
console.log(`Flushed ${writes.length} writes to database`);
} catch (error) {
// On failure, put back to pending
for (const entity of writes) {
this.pendingWrites.set(entity.id, entity);
}
console.error('Flush failed:', error);
}
}
async stop(): Promise<void> {
clearInterval(this.flushInterval);
await this.flush();
}
}
TTL Strategies
Adaptive TTL
// adaptive-ttl.ts
interface CacheMetrics {
hits: number;
misses: number;
lastAccess: number;
}
class AdaptiveTTLCache<T> {
private metrics: Map<string, CacheMetrics> = new Map();
private readonly minTtl: number;
private readonly maxTtl: number;
private readonly baseTtl: number;
constructor(
private prefix: string,
options: { minTtl?: number; maxTtl?: number; baseTtl?: number } = {}
) {
this.minTtl = options.minTtl || 60; // 1 minute
this.maxTtl = options.maxTtl || 86400; // 24 hours
this.baseTtl = options.baseTtl || 3600; // 1 hour
}
private calculateTtl(key: string): number {
const metric = this.metrics.get(key);
if (!metric) {
return this.baseTtl;
}
const hitRate = metric.hits / (metric.hits + metric.misses + 1);
const timeSinceLastAccess = Date.now() - metric.lastAccess;
// Higher hit rate = longer TTL
let ttl = this.baseTtl * (1 + hitRate);
// Higher access frequency = longer TTL
if (timeSinceLastAccess < 60000) { // Within 1 minute
ttl *= 1.5;
}
return Math.min(Math.max(ttl, this.minTtl), this.maxTtl);
}
async get(key: string): Promise<T | null> {
const fullKey = `${this.prefix}:${key}`;
const cached = await redis.get(fullKey);
const metric = this.metrics.get(key) || { hits: 0, misses: 0, lastAccess: 0 };
if (cached) {
metric.hits++;
metric.lastAccess = Date.now();
this.metrics.set(key, metric);
return JSON.parse(cached);
}
metric.misses++;
this.metrics.set(key, metric);
return null;
}
async set(key: string, data: T): Promise<void> {
const fullKey = `${this.prefix}:${key}`;
const ttl = Math.round(this.calculateTtl(key));
await redis.setex(fullKey, ttl, JSON.stringify(data));
}
}
Tiered Cache
// tiered-cache.ts
class TieredCache<T> {
private l1: Map<string, { data: T; expiry: number }> = new Map();
private l1MaxSize: number;
private l1Ttl: number;
constructor(
private prefix: string,
private l2Ttl: number = 3600,
l1Options: { maxSize?: number; ttl?: number } = {}
) {
this.l1MaxSize = l1Options.maxSize || 1000;
this.l1Ttl = l1Options.ttl || 60;
}
async get(key: string): Promise<T | null> {
// L1 (in-memory) check
const l1Entry = this.l1.get(key);
if (l1Entry && l1Entry.expiry > Date.now()) {
return l1Entry.data;
}
// L2 (Redis) check
const l2Data = await redis.get(`${this.prefix}:${key}`);
if (l2Data) {
const data = JSON.parse(l2Data);
// Promote to L1
this.setL1(key, data);
return data;
}
return null;
}
async set(key: string, data: T): Promise<void> {
// Store in L1
this.setL1(key, data);
// Store in L2
await redis.setex(
`${this.prefix}:${key}`,
this.l2Ttl,
JSON.stringify(data)
);
}
private setL1(key: string, data: T): void {
// LRU eviction
if (this.l1.size >= this.l1MaxSize) {
const oldestKey = this.l1.keys().next().value;
this.l1.delete(oldestKey);
}
this.l1.set(key, {
data,
expiry: Date.now() + this.l1Ttl * 1000,
});
}
async invalidate(key: string): Promise<void> {
this.l1.delete(key);
await redis.del(`${this.prefix}:${key}`);
}
}
Distributed Locks
Redlock Implementation
// distributed-lock.ts
import Redlock from 'redlock';
const redlock = new Redlock([redis], {
driftFactor: 0.01,
retryCount: 10,
retryDelay: 200,
retryJitter: 200,
automaticExtensionThreshold: 500,
});
// Usage example
async function processOrderWithLock(orderId: string) {
let lock;
try {
// Acquire lock
lock = await redlock.acquire([`lock:order:${orderId}`], 30000);
// Critical section
const order = await db.order.findUnique({ where: { id: orderId } });
if (order?.status !== 'pending') {
throw new Error('Order already processed');
}
await db.order.update({
where: { id: orderId },
data: { status: 'processing' },
});
// Execute processing...
await db.order.update({
where: { id: orderId },
data: { status: 'completed' },
});
} catch (error) {
if (error instanceof Redlock.LockError) {
console.log('Could not acquire lock, order is being processed');
}
throw error;
} finally {
// Release lock
if (lock) {
await lock.release();
}
}
}
// Simple lock implementation
class SimpleLock {
async acquire(key: string, ttl: number = 30000): Promise<string | null> {
const token = crypto.randomUUID();
const result = await redis.set(
`lock:${key}`,
token,
'PX',
ttl,
'NX'
);
return result === 'OK' ? token : null;
}
async release(key: string, token: string): Promise<boolean> {
const script = `
if redis.call("get", KEYS[1]) == ARGV[1] then
return redis.call("del", KEYS[1])
else
return 0
end
`;
const result = await redis.eval(script, 1, `lock:${key}`, token);
return result === 1;
}
async withLock<T>(
key: string,
fn: () => Promise<T>,
ttl: number = 30000
): Promise<T> {
const token = await this.acquire(key, ttl);
if (!token) {
throw new Error(`Could not acquire lock for ${key}`);
}
try {
return await fn();
} finally {
await this.release(key, token);
}
}
}
Session Management
// session.ts
import { randomBytes } from 'crypto';
interface Session {
userId: string;
email: string;
role: string;
createdAt: number;
lastAccessedAt: number;
}
class RedisSessionStore {
private readonly prefix = 'session';
private readonly ttl = 24 * 60 * 60; // 24 hours
async create(userId: string, userData: Partial<Session>): Promise<string> {
const sessionId = randomBytes(32).toString('hex');
const session: Session = {
userId,
email: userData.email || '',
role: userData.role || 'user',
createdAt: Date.now(),
lastAccessedAt: Date.now(),
};
const key = `${this.prefix}:${sessionId}`;
await redis.setex(key, this.ttl, JSON.stringify(session));
// Manage user session list
await redis.sadd(`user_sessions:${userId}`, sessionId);
await redis.expire(`user_sessions:${userId}`, this.ttl);
return sessionId;
}
async get(sessionId: string): Promise<Session | null> {
const key = `${this.prefix}:${sessionId}`;
const data = await redis.get(key);
if (!data) return null;
const session: Session = JSON.parse(data);
// Sliding window: extend TTL on access
session.lastAccessedAt = Date.now();
await redis.setex(key, this.ttl, JSON.stringify(session));
return session;
}
async destroy(sessionId: string): Promise<void> {
const session = await this.get(sessionId);
if (session) {
await redis.srem(`user_sessions:${session.userId}`, sessionId);
}
await redis.del(`${this.prefix}:${sessionId}`);
}
async destroyAllUserSessions(userId: string): Promise<void> {
const sessionIds = await redis.smembers(`user_sessions:${userId}`);
if (sessionIds.length > 0) {
const keys = sessionIds.map(id => `${this.prefix}:${id}`);
await redis.del(...keys);
}
await redis.del(`user_sessions:${userId}`);
}
async getUserSessionCount(userId: string): Promise<number> {
return redis.scard(`user_sessions:${userId}`);
}
}
const sessionStore = new RedisSessionStore();
export { sessionStore };
Rate Limiting
Fixed Window
// rate-limiter.ts
class FixedWindowRateLimiter {
constructor(
private windowSizeSeconds: number,
private maxRequests: number
) {}
async isAllowed(key: string): Promise<{ allowed: boolean; remaining: number }> {
const windowKey = `ratelimit:${key}:${Math.floor(Date.now() / 1000 / this.windowSizeSeconds)}`;
const current = await redis.incr(windowKey);
if (current === 1) {
await redis.expire(windowKey, this.windowSizeSeconds);
}
const allowed = current <= this.maxRequests;
const remaining = Math.max(0, this.maxRequests - current);
return { allowed, remaining };
}
}
Sliding Window Log
class SlidingWindowLogRateLimiter {
constructor(
private windowSizeMs: number,
private maxRequests: number
) {}
async isAllowed(key: string): Promise<{ allowed: boolean; remaining: number; retryAfter?: number }> {
const now = Date.now();
const windowStart = now - this.windowSizeMs;
const redisKey = `ratelimit:sliding:${key}`;
// Execute in transaction
const multi = redis.multi();
// Remove old entries
multi.zremrangebyscore(redisKey, 0, windowStart);
// Count requests in current window
multi.zcard(redisKey);
// Get results
const results = await multi.exec();
const currentCount = results![1][1] as number;
if (currentCount >= this.maxRequests) {
// Get oldest request time to calculate retry time
const oldestRequest = await redis.zrange(redisKey, 0, 0, 'WITHSCORES');
const retryAfter = oldestRequest.length > 0
? Math.ceil((parseInt(oldestRequest[1]) + this.windowSizeMs - now) / 1000)
: 1;
return {
allowed: false,
remaining: 0,
retryAfter,
};
}
// Add new request
await redis.zadd(redisKey, now, `${now}:${Math.random()}`);
await redis.expire(redisKey, Math.ceil(this.windowSizeMs / 1000));
return {
allowed: true,
remaining: this.maxRequests - currentCount - 1,
};
}
}
Token Bucket
class TokenBucketRateLimiter {
constructor(
private bucketSize: number, // Maximum bucket capacity
private refillRate: number, // Tokens refilled per second
private refillIntervalMs: number = 1000
) {}
async isAllowed(key: string, tokensRequired: number = 1): Promise<{
allowed: boolean;
tokens: number;
retryAfter?: number;
}> {
const redisKey = `ratelimit:bucket:${key}`;
// Process atomically with Lua script
const script = `
local bucket_key = KEYS[1]
local bucket_size = tonumber(ARGV[1])
local refill_rate = tonumber(ARGV[2])
local refill_interval = tonumber(ARGV[3])
local tokens_required = tonumber(ARGV[4])
local now = tonumber(ARGV[5])
local bucket = redis.call('HMGET', bucket_key, 'tokens', 'last_refill')
local tokens = tonumber(bucket[1]) or bucket_size
local last_refill = tonumber(bucket[2]) or now
-- Refill tokens
local time_passed = now - last_refill
local refills = math.floor(time_passed / refill_interval)
tokens = math.min(bucket_size, tokens + refills * refill_rate)
last_refill = last_refill + refills * refill_interval
if tokens >= tokens_required then
tokens = tokens - tokens_required
redis.call('HMSET', bucket_key, 'tokens', tokens, 'last_refill', last_refill)
redis.call('EXPIRE', bucket_key, 3600)
return {1, tokens}
else
redis.call('HMSET', bucket_key, 'tokens', tokens, 'last_refill', last_refill)
redis.call('EXPIRE', bucket_key, 3600)
local tokens_needed = tokens_required - tokens
local wait_time = math.ceil(tokens_needed / refill_rate * refill_interval)
return {0, tokens, wait_time}
end
`;
const result = await redis.eval(
script,
1,
redisKey,
this.bucketSize,
this.refillRate,
this.refillIntervalMs,
tokensRequired,
Date.now()
) as [number, number, number?];
return {
allowed: result[0] === 1,
tokens: result[1],
retryAfter: result[2] ? Math.ceil(result[2] / 1000) : undefined,
};
}
}
Monitoring and Metrics
// redis-metrics.ts
class RedisMetrics {
async getInfo(): Promise<Record<string, string>> {
const info = await redis.info();
const lines = info.split('\r\n');
const result: Record<string, string> = {};
for (const line of lines) {
if (line.includes(':')) {
const [key, value] = line.split(':');
result[key] = value;
}
}
return result;
}
async getKeyCount(): Promise<number> {
const info = await this.getInfo();
return parseInt(info['db0']?.split(',')[0]?.split('=')[1] || '0');
}
async getMemoryUsage(): Promise<{
used: number;
peak: number;
fragmentation: number;
}> {
const info = await this.getInfo();
return {
used: parseInt(info['used_memory'] || '0'),
peak: parseInt(info['used_memory_peak'] || '0'),
fragmentation: parseFloat(info['mem_fragmentation_ratio'] || '1'),
};
}
async getHitRate(): Promise<number> {
const info = await this.getInfo();
const hits = parseInt(info['keyspace_hits'] || '0');
const misses = parseInt(info['keyspace_misses'] || '0');
const total = hits + misses;
return total > 0 ? hits / total : 0;
}
async getSlowLogs(count: number = 10): Promise<any[]> {
return redis.slowlog('GET', count);
}
}
const redisMetrics = new RedisMetrics();
export { redisMetrics };
Summary
Redis is a powerful tool for achieving high-speed data access.
Choosing Cache Strategies
| Pattern | Use Case |
|---|---|
| Cache-Aside | Read-heavy, few updates |
| Write-Through | Consistency-focused |
| Write-Behind | Write performance-focused |
Best Practices
- Proper TTL Design: Configure based on data characteristics
- Key Design: Namespace and versioning
- Connection Pooling: Connection reuse
- Monitoring: Hit rate and memory usage
- Failover: Redis Cluster/Sentinel
Properly utilizing Redis can significantly improve application performance.