Cache Extension
High-performance caching with multiple backends and specialized cache types
Cache Extension
The Cache extension provides high-performance caching capabilities with support for multiple backends, specialized cache types, and advanced features like TTL management, distributed caching, and cache warming.
The Cache extension supports Redis, Memcached, in-memory, and database backends with unified interfaces for all cache operations.
Features
Multiple Backends
- Redis: High-performance distributed caching
- Memcached: Simple distributed memory caching
- In-Memory: Fast local caching with LRU eviction
- Database: Persistent caching with SQL/NoSQL backends
- Hybrid: Multi-tier caching strategies
Cache Types
- String Cache: Simple key-value string storage
- JSON Cache: Automatic JSON serialization/deserialization
- Counter Cache: Atomic increment/decrement operations
- Multi Cache: Batch operations for multiple keys
- Tagged Cache: Group-based cache invalidation
Advanced Features
- TTL Management: Flexible expiration policies
- Cache Warming: Preload frequently accessed data
- Distributed Locking: Prevent cache stampedes
- Compression: Reduce memory usage
- Monitoring: Real-time cache metrics
- Eviction Policies: LRU, LFU, FIFO strategies
Installation
go get github.com/xraph/forge/extensions/cacheConfiguration
extensions:
cache:
# Default backend
default_backend: "redis"
# Redis Configuration
redis:
host: "localhost"
port: 6379
password: "${REDIS_PASSWORD}"
database: 0
pool_size: 10
max_retries: 3
dial_timeout: "5s"
read_timeout: "3s"
write_timeout: "3s"
pool_timeout: "4s"
idle_timeout: "5m"
idle_check_frequency: "1m"
cluster:
enabled: false
nodes:
- "localhost:7000"
- "localhost:7001"
- "localhost:7002"
sentinel:
enabled: false
master_name: "mymaster"
nodes:
- "localhost:26379"
- "localhost:26380"
# Memcached Configuration
memcached:
servers:
- "localhost:11211"
- "localhost:11212"
timeout: "1s"
max_idle_conns: 2
# In-Memory Configuration
memory:
max_size: "100MB"
eviction_policy: "lru" # lru, lfu, fifo
cleanup_interval: "10m"
# Database Configuration
database:
driver: "postgres"
dsn: "${DATABASE_URL}"
table_name: "cache_entries"
cleanup_interval: "1h"
# Global Settings
settings:
default_ttl: "1h"
compression:
enabled: true
algorithm: "gzip" # gzip, lz4, snappy
threshold: 1024 # bytes
serialization:
format: "json" # json, msgpack, gob
monitoring:
enabled: true
metrics_interval: "30s"
warming:
enabled: true
strategies: ["popular", "recent"]# Redis Configuration
export REDIS_HOST="localhost"
export REDIS_PORT="6379"
export REDIS_PASSWORD="your-redis-password"
export REDIS_DATABASE="0"
# Memcached Configuration
export MEMCACHED_SERVERS="localhost:11211,localhost:11212"
# Cache Settings
export CACHE_DEFAULT_BACKEND="redis"
export CACHE_DEFAULT_TTL="1h"
export CACHE_COMPRESSION_ENABLED="true"
export CACHE_MONITORING_ENABLED="true"
# Database Cache
export CACHE_DATABASE_URL="postgres://user:pass@localhost/cache_db"package main
import (
"time"
"github.com/xraph/forge"
"github.com/xraph/forge/extensions/cache"
)
func main() {
app := forge.New()
// Configure Cache extension
cacheExt := cache.New(cache.Config{
DefaultBackend: "redis",
Redis: cache.RedisConfig{
Host: "localhost",
Port: 6379,
Password: "your-redis-password",
Database: 0,
PoolSize: 10,
MaxRetries: 3,
DialTimeout: time.Second * 5,
ReadTimeout: time.Second * 3,
WriteTimeout: time.Second * 3,
},
Memory: cache.MemoryConfig{
MaxSize: 100 * 1024 * 1024, // 100MB
EvictionPolicy: "lru",
CleanupInterval: time.Minute * 10,
},
Settings: cache.Settings{
DefaultTTL: time.Hour,
Compression: cache.CompressionConfig{
Enabled: true,
Algorithm: "gzip",
Threshold: 1024,
},
Monitoring: cache.MonitoringConfig{
Enabled: true,
MetricsInterval: time.Second * 30,
},
},
})
app.RegisterExtension(cacheExt)
app.Run()
}Usage Examples
Basic Cache Operations
func cacheStringHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Set a string value
err := cache.Set(c.Context(), "user:123:name", "John Doe", time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set cache"})
}
// Get a string value
value, err := cache.Get(c.Context(), "user:123:name")
if err != nil {
if cache.IsNotFound(err) {
return c.JSON(404, map[string]string{"error": "Key not found"})
}
return c.JSON(500, map[string]string{"error": "Failed to get cache"})
}
// Check if key exists
exists, err := cache.Exists(c.Context(), "user:123:name")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to check existence"})
}
// Delete a key
err = cache.Delete(c.Context(), "user:123:name")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to delete cache"})
}
return c.JSON(200, map[string]interface{}{
"value": value,
"exists": exists,
"message": "Cache operations completed",
})
}
func cacheWithDefaultTTLHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Set with default TTL (configured in settings)
err := cache.Set(c.Context(), "temp:data", "temporary value", 0)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set cache"})
}
// Get TTL for a key
ttl, err := cache.TTL(c.Context(), "temp:data")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to get TTL"})
}
return c.JSON(200, map[string]interface{}{
"ttl_seconds": int(ttl.Seconds()),
"message": "TTL retrieved successfully",
})
}type User struct {
ID string `json:"id"`
Name string `json:"name"`
Email string `json:"email"`
LastSeen time.Time `json:"last_seen"`
}
func cacheJSONHandler(c forge.Context) error {
cache := forge.GetCache(c)
user := User{
ID: "123",
Name: "John Doe",
Email: "john@example.com",
LastSeen: time.Now(),
}
// Set JSON object
err := cache.SetJSON(c.Context(), "user:123", user, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to cache user"})
}
// Get JSON object
var cachedUser User
err = cache.GetJSON(c.Context(), "user:123", &cachedUser)
if err != nil {
if cache.IsNotFound(err) {
return c.JSON(404, map[string]string{"error": "User not found in cache"})
}
return c.JSON(500, map[string]string{"error": "Failed to get cached user"})
}
return c.JSON(200, map[string]interface{}{
"user": cachedUser,
"message": "User retrieved from cache",
})
}
func cacheComplexDataHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Cache complex nested data
data := map[string]interface{}{
"users": []User{
{ID: "1", Name: "Alice", Email: "alice@example.com"},
{ID: "2", Name: "Bob", Email: "bob@example.com"},
},
"metadata": map[string]interface{}{
"total": 2,
"page": 1,
"created_at": time.Now(),
},
}
err := cache.SetJSON(c.Context(), "users:page:1", data, time.Minute*30)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to cache data"})
}
var cachedData map[string]interface{}
err = cache.GetJSON(c.Context(), "users:page:1", &cachedData)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to get cached data"})
}
return c.JSON(200, cachedData)
}func ttlManagementHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Set with specific TTL
err := cache.Set(c.Context(), "session:abc123", "user_data", time.Minute*30)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set cache"})
}
// Extend TTL
err = cache.Expire(c.Context(), "session:abc123", time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to extend TTL"})
}
// Get remaining TTL
ttl, err := cache.TTL(c.Context(), "session:abc123")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to get TTL"})
}
// Set expiration at specific time
expireAt := time.Now().Add(time.Hour * 2)
err = cache.ExpireAt(c.Context(), "session:abc123", expireAt)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set expiration time"})
}
return c.JSON(200, map[string]interface{}{
"ttl_seconds": int(ttl.Seconds()),
"expires_at": expireAt,
"message": "TTL management completed",
})
}
func conditionalCacheHandler(c forge.Context) error {
cache := forge.GetCache(c)
key := "counter:daily"
// Set only if key doesn't exist
success, err := cache.SetNX(c.Context(), key, "1", time.Hour*24)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set cache"})
}
if !success {
// Key already exists, increment counter
newValue, err := cache.Incr(c.Context(), key)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to increment counter"})
}
return c.JSON(200, map[string]interface{}{
"counter": newValue,
"message": "Counter incremented",
})
}
return c.JSON(200, map[string]interface{}{
"counter": 1,
"message": "Counter initialized",
})
}func batchOperationsHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Set multiple keys at once
items := map[string]interface{}{
"user:1:name": "Alice",
"user:2:name": "Bob",
"user:3:name": "Charlie",
"user:1:email": "alice@example.com",
"user:2:email": "bob@example.com",
"user:3:email": "charlie@example.com",
}
err := cache.SetMultiple(c.Context(), items, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set multiple keys"})
}
// Get multiple keys at once
keys := []string{"user:1:name", "user:2:name", "user:3:name"}
values, err := cache.GetMultiple(c.Context(), keys)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to get multiple keys"})
}
// Delete multiple keys
deletedCount, err := cache.DeleteMultiple(c.Context(), keys)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to delete multiple keys"})
}
return c.JSON(200, map[string]interface{}{
"values": values,
"deleted_count": deletedCount,
"message": "Batch operations completed",
})
}
func pipelineOperationsHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Use pipeline for atomic operations
pipeline := cache.Pipeline()
// Queue multiple operations
pipeline.Set("key1", "value1", time.Hour)
pipeline.Set("key2", "value2", time.Hour)
pipeline.Incr("counter")
pipeline.Get("existing_key")
// Execute all operations atomically
results, err := pipeline.Exec(c.Context())
if err != nil {
return c.JSON(500, map[string]string{"error": "Pipeline execution failed"})
}
return c.JSON(200, map[string]interface{}{
"results": results,
"message": "Pipeline operations completed",
})
}Specialized Cache Types
func counterCacheHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Initialize counter
err := cache.Set(c.Context(), "page_views", "0", 0) // No expiration
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to initialize counter"})
}
// Increment counter
newValue, err := cache.Incr(c.Context(), "page_views")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to increment counter"})
}
// Increment by specific amount
newValue, err = cache.IncrBy(c.Context(), "page_views", 5)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to increment counter by amount"})
}
// Decrement counter
newValue, err = cache.Decr(c.Context(), "page_views")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to decrement counter"})
}
// Decrement by specific amount
newValue, err = cache.DecrBy(c.Context(), "page_views", 2)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to decrement counter by amount"})
}
return c.JSON(200, map[string]interface{}{
"page_views": newValue,
"message": "Counter operations completed",
})
}
func rateLimitingWithCounterHandler(c forge.Context) error {
cache := forge.GetCache(c)
userID := c.Get("user_id").(string)
// Rate limiting using counter cache
key := fmt.Sprintf("rate_limit:%s:%d", userID, time.Now().Unix()/60) // Per minute
// Increment request count
count, err := cache.Incr(c.Context(), key)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to increment rate limit counter"})
}
// Set expiration on first request
if count == 1 {
err = cache.Expire(c.Context(), key, time.Minute)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set rate limit expiration"})
}
}
// Check rate limit
const maxRequests = 100
if count > maxRequests {
return c.JSON(429, map[string]interface{}{
"error": "Rate limit exceeded",
"limit": maxRequests,
"current": count,
"reset_in": 60 - (time.Now().Unix() % 60),
})
}
return c.JSON(200, map[string]interface{}{
"requests_remaining": maxRequests - count,
"reset_in": 60 - (time.Now().Unix() % 60),
})
}func taggedCacheHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Set cache with tags
err := cache.SetWithTags(c.Context(), "user:123:profile", "user_data",
[]string{"user:123", "profiles"}, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set tagged cache"})
}
err = cache.SetWithTags(c.Context(), "user:123:settings", "settings_data",
[]string{"user:123", "settings"}, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set tagged cache"})
}
err = cache.SetWithTags(c.Context(), "user:456:profile", "other_user_data",
[]string{"user:456", "profiles"}, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to set tagged cache"})
}
// Invalidate all cache entries with specific tag
deletedCount, err := cache.InvalidateTag(c.Context(), "user:123")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to invalidate tag"})
}
return c.JSON(200, map[string]interface{}{
"deleted_count": deletedCount,
"message": "Tagged cache operations completed",
})
}
func cacheInvalidationHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Cache user data with multiple tags
userID := c.Param("user_id")
userData := map[string]interface{}{
"id": userID,
"name": "John Doe",
"email": "john@example.com",
}
tags := []string{
fmt.Sprintf("user:%s", userID),
"users",
"profiles",
}
err := cache.SetJSONWithTags(c.Context(),
fmt.Sprintf("user:%s:full", userID),
userData, tags, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to cache user data"})
}
// When user is updated, invalidate all related cache
err = cache.InvalidateTag(c.Context(), fmt.Sprintf("user:%s", userID))
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to invalidate user cache"})
}
// Invalidate all user profiles when global update occurs
err = cache.InvalidateTag(c.Context(), "profiles")
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to invalidate profiles cache"})
}
return c.JSON(200, map[string]string{
"message": "Cache invalidation completed",
})
}func distributedLockHandler(c forge.Context) error {
cache := forge.GetCache(c)
lockKey := "process:critical_section"
lockValue := fmt.Sprintf("worker:%d", time.Now().UnixNano())
lockTTL := time.Second * 30
// Acquire distributed lock
acquired, err := cache.Lock(c.Context(), lockKey, lockValue, lockTTL)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to acquire lock"})
}
if !acquired {
return c.JSON(409, map[string]string{
"error": "Resource is locked by another process",
})
}
// Ensure lock is released even if function panics
defer func() {
err := cache.Unlock(c.Context(), lockKey, lockValue)
if err != nil {
log.Printf("Failed to release lock: %v", err)
}
}()
// Perform critical section work
time.Sleep(time.Second * 5) // Simulate work
// Extend lock if needed
extended, err := cache.ExtendLock(c.Context(), lockKey, lockValue, time.Second*30)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to extend lock"})
}
return c.JSON(200, map[string]interface{}{
"message": "Critical section completed",
"lock_extended": extended,
})
}
func preventCacheStampedeHandler(c forge.Context) error {
cache := forge.GetCache(c)
key := "expensive:computation:result"
lockKey := "lock:" + key
// Try to get from cache first
result, err := cache.Get(c.Context(), key)
if err == nil {
return c.JSON(200, map[string]interface{}{
"result": result,
"source": "cache",
})
}
if !cache.IsNotFound(err) {
return c.JSON(500, map[string]string{"error": "Cache error"})
}
// Cache miss - try to acquire lock to compute result
lockValue := fmt.Sprintf("worker:%d", time.Now().UnixNano())
acquired, err := cache.Lock(c.Context(), lockKey, lockValue, time.Minute)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to acquire computation lock"})
}
if !acquired {
// Another process is computing, wait and try cache again
time.Sleep(time.Millisecond * 100)
result, err := cache.Get(c.Context(), key)
if err == nil {
return c.JSON(200, map[string]interface{}{
"result": result,
"source": "cache_after_wait",
})
}
return c.JSON(503, map[string]string{
"error": "Service temporarily unavailable",
})
}
defer cache.Unlock(c.Context(), lockKey, lockValue)
// Double-check cache after acquiring lock
result, err = cache.Get(c.Context(), key)
if err == nil {
return c.JSON(200, map[string]interface{}{
"result": result,
"source": "cache_double_check",
})
}
// Perform expensive computation
computedResult := performExpensiveComputation()
// Cache the result
err = cache.Set(c.Context(), key, computedResult, time.Hour)
if err != nil {
log.Printf("Failed to cache result: %v", err)
}
return c.JSON(200, map[string]interface{}{
"result": computedResult,
"source": "computed",
})
}
func performExpensiveComputation() string {
// Simulate expensive computation
time.Sleep(time.Second * 2)
return fmt.Sprintf("computed_result_%d", time.Now().Unix())
}func cacheWarmingHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Warm cache with popular data
popularItems := []string{"item1", "item2", "item3"}
for _, item := range popularItems {
key := fmt.Sprintf("popular:%s", item)
// Check if already cached
exists, err := cache.Exists(c.Context(), key)
if err != nil {
continue
}
if !exists {
// Load data and cache it
data := loadItemData(item)
err = cache.SetJSON(c.Context(), key, data, time.Hour*6)
if err != nil {
log.Printf("Failed to warm cache for %s: %v", item, err)
}
}
}
return c.JSON(200, map[string]string{
"message": "Cache warming completed",
})
}
func preloadUserDataHandler(c forge.Context) error {
cache := forge.GetCache(c)
userID := c.Param("user_id")
// Preload related user data
keys := []string{
fmt.Sprintf("user:%s:profile", userID),
fmt.Sprintf("user:%s:settings", userID),
fmt.Sprintf("user:%s:preferences", userID),
}
// Check which keys are missing
existingKeys, err := cache.ExistsMultiple(c.Context(), keys)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to check cache"})
}
// Load missing data
for i, key := range keys {
if !existingKeys[i] {
var data interface{}
switch {
case strings.Contains(key, "profile"):
data = loadUserProfile(userID)
case strings.Contains(key, "settings"):
data = loadUserSettings(userID)
case strings.Contains(key, "preferences"):
data = loadUserPreferences(userID)
}
if data != nil {
err = cache.SetJSON(c.Context(), key, data, time.Hour*2)
if err != nil {
log.Printf("Failed to preload %s: %v", key, err)
}
}
}
}
return c.JSON(200, map[string]string{
"message": "User data preloaded",
})
}
func loadItemData(item string) interface{} {
// Simulate loading data from database
return map[string]interface{}{
"id": item,
"name": fmt.Sprintf("Item %s", item),
"data": "some_data",
}
}
func loadUserProfile(userID string) interface{} {
return map[string]interface{}{
"id": userID,
"name": "User Name",
"bio": "User bio",
}
}
func loadUserSettings(userID string) interface{} {
return map[string]interface{}{
"theme": "dark",
"language": "en",
"timezone": "UTC",
}
}
func loadUserPreferences(userID string) interface{} {
return map[string]interface{}{
"notifications": true,
"privacy": "public",
}
}Advanced Patterns
// Cache-Aside (Lazy Loading) pattern
func getUserCacheAside(c forge.Context) error {
cache := forge.GetCache(c)
userID := c.Param("user_id")
key := fmt.Sprintf("user:%s", userID)
// Try to get from cache first
var user User
err := cache.GetJSON(c.Context(), key, &user)
if err == nil {
// Cache hit
return c.JSON(200, map[string]interface{}{
"user": user,
"source": "cache",
})
}
if !cache.IsNotFound(err) {
return c.JSON(500, map[string]string{"error": "Cache error"})
}
// Cache miss - load from database
user, err = loadUserFromDB(userID)
if err != nil {
return c.JSON(404, map[string]string{"error": "User not found"})
}
// Store in cache for future requests
err = cache.SetJSON(c.Context(), key, user, time.Hour)
if err != nil {
// Log error but don't fail the request
log.Printf("Failed to cache user %s: %v", userID, err)
}
return c.JSON(200, map[string]interface{}{
"user": user,
"source": "database",
})
}
func updateUserCacheAside(c forge.Context) error {
cache := forge.GetCache(c)
userID := c.Param("user_id")
key := fmt.Sprintf("user:%s", userID)
var updateReq User
if err := c.Bind(&updateReq); err != nil {
return c.JSON(400, map[string]string{"error": "Invalid request"})
}
// Update in database
user, err := updateUserInDB(userID, updateReq)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to update user"})
}
// Invalidate cache
err = cache.Delete(c.Context(), key)
if err != nil {
log.Printf("Failed to invalidate cache for user %s: %v", userID, err)
}
return c.JSON(200, map[string]interface{}{
"user": user,
"message": "User updated successfully",
})
}// Write-Through pattern
func updateUserWriteThrough(c forge.Context) error {
cache := forge.GetCache(c)
userID := c.Param("user_id")
key := fmt.Sprintf("user:%s", userID)
var updateReq User
if err := c.Bind(&updateReq); err != nil {
return c.JSON(400, map[string]string{"error": "Invalid request"})
}
// Update in database first
user, err := updateUserInDB(userID, updateReq)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to update user"})
}
// Update cache immediately
err = cache.SetJSON(c.Context(), key, user, time.Hour)
if err != nil {
// Log error but don't fail since DB update succeeded
log.Printf("Failed to update cache for user %s: %v", userID, err)
}
return c.JSON(200, map[string]interface{}{
"user": user,
"message": "User updated successfully",
})
}
func createUserWriteThrough(c forge.Context) error {
cache := forge.GetCache(c)
var createReq User
if err := c.Bind(&createReq); err != nil {
return c.JSON(400, map[string]string{"error": "Invalid request"})
}
// Create in database
user, err := createUserInDB(createReq)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to create user"})
}
// Cache the new user
key := fmt.Sprintf("user:%s", user.ID)
err = cache.SetJSON(c.Context(), key, user, time.Hour)
if err != nil {
log.Printf("Failed to cache new user %s: %v", user.ID, err)
}
return c.JSON(201, map[string]interface{}{
"user": user,
"message": "User created successfully",
})
}// Write-Behind (Write-Back) pattern
type WriteBackCache struct {
cache forge.Cache
writeQueue chan WriteOperation
batchSize int
flushInterval time.Duration
}
type WriteOperation struct {
Key string
Value interface{}
Op string // "create", "update", "delete"
}
func NewWriteBackCache(cache forge.Cache) *WriteBackCache {
wbc := &WriteBackCache{
cache: cache,
writeQueue: make(chan WriteOperation, 1000),
batchSize: 10,
flushInterval: time.Second * 5,
}
// Start background writer
go wbc.backgroundWriter()
return wbc
}
func (wbc *WriteBackCache) backgroundWriter() {
ticker := time.NewTicker(wbc.flushInterval)
defer ticker.Stop()
batch := make([]WriteOperation, 0, wbc.batchSize)
for {
select {
case op := <-wbc.writeQueue:
batch = append(batch, op)
if len(batch) >= wbc.batchSize {
wbc.flushBatch(batch)
batch = batch[:0]
}
case <-ticker.C:
if len(batch) > 0 {
wbc.flushBatch(batch)
batch = batch[:0]
}
}
}
}
func (wbc *WriteBackCache) flushBatch(batch []WriteOperation) {
for _, op := range batch {
switch op.Op {
case "create", "update":
err := updateUserInDB(op.Key, op.Value.(User))
if err != nil {
log.Printf("Failed to write %s to DB: %v", op.Key, err)
}
case "delete":
err := deleteUserFromDB(op.Key)
if err != nil {
log.Printf("Failed to delete %s from DB: %v", op.Key, err)
}
}
}
}
func updateUserWriteBehind(c forge.Context, wbc *WriteBackCache) error {
userID := c.Param("user_id")
key := fmt.Sprintf("user:%s", userID)
var updateReq User
if err := c.Bind(&updateReq); err != nil {
return c.JSON(400, map[string]string{"error": "Invalid request"})
}
// Update cache immediately
err := wbc.cache.SetJSON(c.Context(), key, updateReq, time.Hour)
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to update cache"})
}
// Queue database write
select {
case wbc.writeQueue <- WriteOperation{
Key: userID,
Value: updateReq,
Op: "update",
}:
// Successfully queued
default:
// Queue full, handle gracefully
log.Printf("Write queue full for user %s", userID)
}
return c.JSON(200, map[string]interface{}{
"user": updateReq,
"message": "User updated successfully",
})
}// Multi-tier caching strategy
type MultiTierCache struct {
l1Cache forge.Cache // Fast local cache (memory)
l2Cache forge.Cache // Distributed cache (Redis)
l3Cache forge.Cache // Persistent cache (Database)
}
func NewMultiTierCache(l1, l2, l3 forge.Cache) *MultiTierCache {
return &MultiTierCache{
l1Cache: l1,
l2Cache: l2,
l3Cache: l3,
}
}
func (mtc *MultiTierCache) Get(ctx context.Context, key string) (interface{}, error) {
// Try L1 cache first (fastest)
value, err := mtc.l1Cache.Get(ctx, key)
if err == nil {
return value, nil
}
if !mtc.l1Cache.IsNotFound(err) {
log.Printf("L1 cache error for key %s: %v", key, err)
}
// Try L2 cache
value, err = mtc.l2Cache.Get(ctx, key)
if err == nil {
// Populate L1 cache
mtc.l1Cache.Set(ctx, key, value, time.Minute*5)
return value, nil
}
if !mtc.l2Cache.IsNotFound(err) {
log.Printf("L2 cache error for key %s: %v", key, err)
}
// Try L3 cache
value, err = mtc.l3Cache.Get(ctx, key)
if err == nil {
// Populate L2 and L1 caches
mtc.l2Cache.Set(ctx, key, value, time.Hour)
mtc.l1Cache.Set(ctx, key, value, time.Minute*5)
return value, nil
}
return nil, err
}
func (mtc *MultiTierCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
// Set in all tiers
var errors []error
// L1 cache with shorter TTL
l1TTL := ttl
if l1TTL > time.Minute*10 {
l1TTL = time.Minute * 10
}
if err := mtc.l1Cache.Set(ctx, key, value, l1TTL); err != nil {
errors = append(errors, fmt.Errorf("L1 cache error: %v", err))
}
// L2 cache
if err := mtc.l2Cache.Set(ctx, key, value, ttl); err != nil {
errors = append(errors, fmt.Errorf("L2 cache error: %v", err))
}
// L3 cache with longer TTL
l3TTL := ttl * 2
if err := mtc.l3Cache.Set(ctx, key, value, l3TTL); err != nil {
errors = append(errors, fmt.Errorf("L3 cache error: %v", err))
}
if len(errors) > 0 {
return fmt.Errorf("cache errors: %v", errors)
}
return nil
}
func (mtc *MultiTierCache) Delete(ctx context.Context, key string) error {
// Delete from all tiers
var errors []error
if err := mtc.l1Cache.Delete(ctx, key); err != nil {
errors = append(errors, fmt.Errorf("L1 cache error: %v", err))
}
if err := mtc.l2Cache.Delete(ctx, key); err != nil {
errors = append(errors, fmt.Errorf("L2 cache error: %v", err))
}
if err := mtc.l3Cache.Delete(ctx, key); err != nil {
errors = append(errors, fmt.Errorf("L3 cache error: %v", err))
}
if len(errors) > 0 {
return fmt.Errorf("cache deletion errors: %v", errors)
}
return nil
}
func multiTierCacheHandler(c forge.Context) error {
// Get cache instances
memoryCache := forge.GetCache(c, "memory")
redisCache := forge.GetCache(c, "redis")
dbCache := forge.GetCache(c, "database")
mtc := NewMultiTierCache(memoryCache, redisCache, dbCache)
userID := c.Param("user_id")
key := fmt.Sprintf("user:%s", userID)
// Try to get from multi-tier cache
value, err := mtc.Get(c.Context(), key)
if err == nil {
return c.JSON(200, map[string]interface{}{
"user": value,
"source": "cache",
})
}
// Load from database
user, err := loadUserFromDB(userID)
if err != nil {
return c.JSON(404, map[string]string{"error": "User not found"})
}
// Store in multi-tier cache
err = mtc.Set(c.Context(), key, user, time.Hour)
if err != nil {
log.Printf("Failed to cache user in multi-tier: %v", err)
}
return c.JSON(200, map[string]interface{}{
"user": user,
"source": "database",
})
}Monitoring and Metrics
func cacheMetricsHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Get cache statistics
stats, err := cache.Stats(c.Context())
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to get cache stats"})
}
return c.JSON(200, map[string]interface{}{
"hits": stats.Hits,
"misses": stats.Misses,
"hit_rate": float64(stats.Hits) / float64(stats.Hits + stats.Misses),
"keys": stats.Keys,
"memory_usage": stats.MemoryUsage,
"connections": stats.Connections,
"operations": stats.Operations,
"errors": stats.Errors,
"uptime": stats.Uptime,
})
}
func cachePerformanceHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Measure cache operation performance
start := time.Now()
// Perform test operations
testKey := "performance_test"
testValue := "test_value"
// Set operation
setStart := time.Now()
err := cache.Set(c.Context(), testKey, testValue, time.Minute)
setDuration := time.Since(setStart)
if err != nil {
return c.JSON(500, map[string]string{"error": "Set operation failed"})
}
// Get operation
getStart := time.Now()
_, err = cache.Get(c.Context(), testKey)
getDuration := time.Since(getStart)
if err != nil {
return c.JSON(500, map[string]string{"error": "Get operation failed"})
}
// Delete operation
deleteStart := time.Now()
err = cache.Delete(c.Context(), testKey)
deleteDuration := time.Since(deleteStart)
if err != nil {
return c.JSON(500, map[string]string{"error": "Delete operation failed"})
}
totalDuration := time.Since(start)
return c.JSON(200, map[string]interface{}{
"set_duration_ms": setDuration.Milliseconds(),
"get_duration_ms": getDuration.Milliseconds(),
"delete_duration_ms": deleteDuration.Milliseconds(),
"total_duration_ms": totalDuration.Milliseconds(),
})
}// Cache performance middleware
func cachePerformanceMiddleware() forge.MiddlewareFunc {
return func(next forge.HandlerFunc) forge.HandlerFunc {
return func(c forge.Context) error {
start := time.Now()
// Execute handler
err := next(c)
duration := time.Since(start)
// Log slow requests
if duration > time.Millisecond*100 {
log.Printf("Slow cache operation: %s %s took %v",
c.Request().Method, c.Request().URL.Path, duration)
}
// Add performance headers
c.Response().Header().Set("X-Cache-Duration",
fmt.Sprintf("%.2fms", float64(duration.Nanoseconds())/1e6))
return err
}
}
}
func setupCacheMonitoring(app *forge.App) {
cache := forge.GetCache(app)
// Cache metrics endpoint
app.GET("/metrics/cache", func(c forge.Context) error {
stats, err := cache.Stats(c.Context())
if err != nil {
return c.JSON(500, map[string]string{"error": "Failed to get stats"})
}
// Calculate derived metrics
hitRate := float64(0)
if stats.Hits+stats.Misses > 0 {
hitRate = float64(stats.Hits) / float64(stats.Hits+stats.Misses)
}
memoryUsageMB := float64(stats.MemoryUsage) / 1024 / 1024
return c.JSON(200, map[string]interface{}{
"cache_hits_total": stats.Hits,
"cache_misses_total": stats.Misses,
"cache_hit_rate": hitRate,
"cache_keys_total": stats.Keys,
"cache_memory_usage_mb": memoryUsageMB,
"cache_connections_active": stats.Connections,
"cache_operations_total": stats.Operations,
"cache_errors_total": stats.Errors,
"cache_uptime_seconds": stats.Uptime.Seconds(),
})
})
// Cache health endpoint
app.GET("/health/cache", func(c forge.Context) error {
health, err := cache.Health(c.Context())
if err != nil {
return c.JSON(503, map[string]interface{}{
"status": "unhealthy",
"error": err.Error(),
})
}
return c.JSON(200, map[string]interface{}{
"status": health.Status,
"latency_ms": health.Latency.Milliseconds(),
"connections": health.Connections,
"memory_mb": float64(health.MemoryUsage) / 1024 / 1024,
})
})
}func cacheHealthHandler(c forge.Context) error {
cache := forge.GetCache(c)
// Perform health check
health, err := cache.Health(c.Context())
if err != nil {
return c.JSON(503, map[string]interface{}{
"status": "unhealthy",
"error": err.Error(),
"timestamp": time.Now(),
})
}
// Determine overall health status
status := "healthy"
if health.Latency > time.Millisecond*100 {
status = "degraded"
}
if health.Connections < 1 {
status = "unhealthy"
}
return c.JSON(200, map[string]interface{}{
"status": status,
"latency_ms": health.Latency.Milliseconds(),
"connections": health.Connections,
"memory_usage_mb": float64(health.MemoryUsage) / 1024 / 1024,
"uptime_seconds": health.Uptime.Seconds(),
"timestamp": time.Now(),
})
}
func comprehensiveCacheHealthCheck(c forge.Context) error {
cache := forge.GetCache(c)
checks := make(map[string]interface{})
overallHealthy := true
// Test basic connectivity
err := cache.Ping(c.Context())
checks["connectivity"] = map[string]interface{}{
"healthy": err == nil,
"error": getErrorString(err),
}
if err != nil {
overallHealthy = false
}
// Test set operation
testKey := "health_check_set"
err = cache.Set(c.Context(), testKey, "test", time.Minute)
checks["set_operation"] = map[string]interface{}{
"healthy": err == nil,
"error": getErrorString(err),
}
if err != nil {
overallHealthy = false
}
// Test get operation
_, err = cache.Get(c.Context(), testKey)
checks["get_operation"] = map[string]interface{}{
"healthy": err == nil,
"error": getErrorString(err),
}
if err != nil {
overallHealthy = false
}
// Test delete operation
err = cache.Delete(c.Context(), testKey)
checks["delete_operation"] = map[string]interface{}{
"healthy": err == nil,
"error": getErrorString(err),
}
if err != nil {
overallHealthy = false
}
// Get cache statistics
stats, err := cache.Stats(c.Context())
checks["statistics"] = map[string]interface{}{
"healthy": err == nil,
"error": getErrorString(err),
}
if err != nil {
overallHealthy = false
} else {
checks["statistics"].(map[string]interface{})["hit_rate"] =
float64(stats.Hits) / float64(stats.Hits + stats.Misses)
}
statusCode := 200
if !overallHealthy {
statusCode = 503
}
return c.JSON(statusCode, map[string]interface{}{
"overall_healthy": overallHealthy,
"checks": checks,
"timestamp": time.Now(),
})
}
func getErrorString(err error) string {
if err == nil {
return ""
}
return err.Error()
}type CacheAlert struct {
Type string `json:"type"`
Severity string `json:"severity"`
Message string `json:"message"`
Timestamp time.Time `json:"timestamp"`
Metrics map[string]interface{} `json:"metrics"`
}
func monitorCacheHealth(cache forge.Cache) {
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for range ticker.C {
checkCacheHealth(cache)
}
}
func checkCacheHealth(cache forge.Cache) {
ctx := context.Background()
// Get cache statistics
stats, err := cache.Stats(ctx)
if err != nil {
sendAlert(CacheAlert{
Type: "cache_stats_error",
Severity: "critical",
Message: fmt.Sprintf("Failed to get cache statistics: %v", err),
Timestamp: time.Now(),
})
return
}
// Check hit rate
hitRate := float64(0)
if stats.Hits+stats.Misses > 0 {
hitRate = float64(stats.Hits) / float64(stats.Hits+stats.Misses)
}
if hitRate < 0.5 { // Less than 50% hit rate
sendAlert(CacheAlert{
Type: "low_hit_rate",
Severity: "warning",
Message: fmt.Sprintf("Cache hit rate is low: %.2f%%", hitRate*100),
Timestamp: time.Now(),
Metrics: map[string]interface{}{
"hit_rate": hitRate,
"hits": stats.Hits,
"misses": stats.Misses,
},
})
}
// Check memory usage
memoryUsageMB := float64(stats.MemoryUsage) / 1024 / 1024
if memoryUsageMB > 1000 { // More than 1GB
sendAlert(CacheAlert{
Type: "high_memory_usage",
Severity: "warning",
Message: fmt.Sprintf("Cache memory usage is high: %.2f MB", memoryUsageMB),
Timestamp: time.Now(),
Metrics: map[string]interface{}{
"memory_usage_mb": memoryUsageMB,
},
})
}
// Check error rate
if stats.Errors > 100 { // More than 100 errors
sendAlert(CacheAlert{
Type: "high_error_rate",
Severity: "critical",
Message: fmt.Sprintf("Cache error count is high: %d", stats.Errors),
Timestamp: time.Now(),
Metrics: map[string]interface{}{
"errors": stats.Errors,
},
})
}
// Check connectivity
err = cache.Ping(ctx)
if err != nil {
sendAlert(CacheAlert{
Type: "connectivity_error",
Severity: "critical",
Message: fmt.Sprintf("Cache connectivity check failed: %v", err),
Timestamp: time.Now(),
})
}
}
func sendAlert(alert CacheAlert) {
// Send alert to monitoring system
log.Printf("CACHE ALERT [%s]: %s", alert.Severity, alert.Message)
// You can integrate with:
// - Slack
// - PagerDuty
// - Email
// - Webhook
// - Prometheus AlertManager
// Example webhook notification
go func() {
alertJSON, _ := json.Marshal(alert)
resp, err := http.Post("https://your-webhook-url.com/alerts",
"application/json", bytes.NewBuffer(alertJSON))
if err != nil {
log.Printf("Failed to send alert webhook: %v", err)
return
}
defer resp.Body.Close()
}()
}Best Practices
Performance Optimization
- Choose the right cache backend for your use case
- Use appropriate TTL values to balance freshness and performance
- Implement cache warming for frequently accessed data
- Use compression for large values
- Monitor cache hit rates and adjust strategies accordingly
Memory Management
- Set appropriate memory limits for in-memory caches
- Use eviction policies that match your access patterns
- Monitor memory usage and set up alerts
- Consider using compression for large cached objects
Error Handling
- Always handle cache misses gracefully
- Implement fallback mechanisms when cache is unavailable
- Use circuit breakers for external cache backends
- Log cache errors for monitoring and debugging
Security
- Secure cache connections with TLS/SSL
- Use authentication for distributed cache backends
- Avoid caching sensitive data without encryption
- Implement proper access controls
Troubleshooting
Common Issues
Connection Problems
// Test cache connectivity
err := cache.Ping(ctx)
if err != nil {
log.Printf("Cache connection failed: %v", err)
// Check network, credentials, firewall
}Memory Issues
// Monitor memory usage
stats, err := cache.Stats(ctx)
if err == nil {
memoryMB := float64(stats.MemoryUsage) / 1024 / 1024
log.Printf("Cache memory usage: %.2f MB", memoryMB)
}Performance Problems
// Measure operation latency
start := time.Now()
err := cache.Set(ctx, "test", "value", time.Minute)
duration := time.Since(start)
log.Printf("Set operation took: %v", duration)Monitor cache performance regularly and set up alerts for degraded performance or high error rates.
Next Steps
Setup: Configure your cache backend and test basic operations
Integration: Implement caching in your application with appropriate patterns
Optimization: Tune TTL values, implement cache warming, and optimize for your use case
Monitoring: Set up comprehensive monitoring and alerting
Scaling: Implement distributed caching and multi-tier strategies for production
How is this guide?
Last updated on