Back to Journal
System Design

Complete Guide to Distributed Caching with Go

A comprehensive guide to implementing Distributed Caching using Go, covering architecture, code examples, and production-ready patterns.

Muneer Puthiya Purayil 17 min read

Go's concurrency model and low memory overhead make it an excellent choice for building distributed caching layers. The combination of goroutines for parallel cache operations, channels for cache invalidation propagation, and the go-redis client library provides a production-ready foundation. This guide covers implementing a complete caching layer in Go with Redis.

Core Cache Client

go
1package cache
2 
3import (
4 "context"
5 "encoding/json"
6 "fmt"
7 "time"
8 
9 "github.com/redis/go-redis/v9"
10)
11 
12type Client struct {
13 rdb *redis.Client
14 prefix string
15 metrics *Metrics
16}
17 
18type Config struct {
19 Addr string
20 Password string
21 DB int
22 PoolSize int
23 MinIdleConns int
24 KeyPrefix string
25}
26 
27func NewClient(cfg Config) (*Client, error) {
28 rdb := redis.NewClient(&redis.Options{
29 Addr: cfg.Addr,
30 Password: cfg.Password,
31 DB: cfg.DB,
32 PoolSize: cfg.PoolSize,
33 MinIdleConns: cfg.MinIdleConns,
34 ReadTimeout: 100 * time.Millisecond,
35 WriteTimeout: 100 * time.Millisecond,
36 DialTimeout: 5 * time.Second,
37 })
38 
39 ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
40 defer cancel()
41 
42 if err := rdb.Ping(ctx).Err(); err != nil {
43 return nil, fmt.Errorf("redis ping: %w", err)
44 }
45 
46 return &Client{
47 rdb: rdb,
48 prefix: cfg.KeyPrefix,
49 metrics: NewMetrics(),
50 }, nil
51}
52 
53func (c *Client) Get(ctx context.Context, key string, dest interface{}) error {
54 start := time.Now()
55 fullKey := c.prefixKey(key)
56 
57 raw, err := c.rdb.Get(ctx, fullKey).Bytes()
58 if err == redis.Nil {
59 c.metrics.RecordMiss(time.Since(start))
60 return ErrCacheMiss
61 }
62 if err != nil {
63 return fmt.Errorf("redis get: %w", err)
64 }
65 
66 c.metrics.RecordHit(time.Since(start))
67 return json.Unmarshal(raw, dest)
68}
69 
70func (c *Client) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error {
71 fullKey := c.prefixKey(key)
72 data, err := json.Marshal(value)
73 if err != nil {
74 return fmt.Errorf("marshal: %w", err)
75 }
76 return c.rdb.Set(ctx, fullKey, data, ttl).Err()
77}
78 
79func (c *Client) Delete(ctx context.Context, keys ...string) error {
80 fullKeys := make([]string, len(keys))
81 for i, k := range keys {
82 fullKeys[i] = c.prefixKey(k)
83 }
84 return c.rdb.Del(ctx, fullKeys...).Err()
85}
86 
87func (c *Client) GetOrLoad(ctx context.Context, key string, dest interface{}, ttl time.Duration, loader func() (interface{}, error)) error {
88 err := c.Get(ctx, key, dest)
89 if err == nil {
90 return nil
91 }
92 if err != ErrCacheMiss {
93 return err
94 }
95 
96 value, err := loader()
97 if err != nil {
98 return err
99 }
100 
101 if err := c.Set(ctx, key, value, ttl); err != nil {
102 // Log but don't fail — data was loaded successfully
103 }
104 
105 data, _ := json.Marshal(value)
106 return json.Unmarshal(data, dest)
107}
108 
109func (c *Client) prefixKey(key string) string {
110 if c.prefix == "" {
111 return key
112 }
113 return c.prefix + ":" + key
114}
115 
116var ErrCacheMiss = fmt.Errorf("cache miss")
117 

Pipeline Operations

go
1func (c *Client) GetMany(ctx context.Context, keys []string) (map[string][]byte, error) {
2 pipe := c.rdb.Pipeline()
3 cmds := make(map[string]*redis.StringCmd, len(keys))
4 
5 for _, key := range keys {
6 fullKey := c.prefixKey(key)
7 cmds[key] = pipe.Get(ctx, fullKey)
8 }
9 
10 _, err := pipe.Exec(ctx)
11 if err != nil && err != redis.Nil {
12 return nil, err
13 }
14 
15 results := make(map[string][]byte, len(keys))
16 for key, cmd := range cmds {
17 val, err := cmd.Bytes()
18 if err == nil {
19 results[key] = val
20 }
21 }
22 
23 return results, nil
24}
25 
26func (c *Client) SetMany(ctx context.Context, entries map[string]interface{}, ttl time.Duration) error {
27 pipe := c.rdb.Pipeline()
28 
29 for key, value := range entries {
30 data, err := json.Marshal(value)
31 if err != nil {
32 continue
33 }
34 pipe.Set(ctx, c.prefixKey(key), data, ttl)
35 }
36 
37 _, err := pipe.Exec(ctx)
38 return err
39}
40 

Cache-Aside with Repository Pattern

go
1type ProductRepository struct {
2 db *sql.DB
3 cache *cache.Client
4}
5 
6func (r *ProductRepository) GetByID(ctx context.Context, id string) (*Product, error) {
7 var product Product
8 err := r.cache.GetOrLoad(ctx, fmt.Sprintf("product:%s", id), &product, 10*time.Minute,
9 func() (interface{}, error) {
10 return r.loadFromDB(ctx, id)
11 },
12 )
13 return &product, err
14}
15 
16func (r *ProductRepository) Update(ctx context.Context, id string, update ProductUpdate) (*Product, error) {
17 product, err := r.updateInDB(ctx, id, update)
18 if err != nil {
19 return nil, err
20 }
21 
22 // Invalidate cache
23 r.cache.Delete(ctx, fmt.Sprintf("product:%s", id))
24 
25 return product, nil
26}
27 

Need a second opinion on your system design architecture?

I run free 30-minute strategy calls for engineering teams tackling this exact problem.

Book a Free Call

Cache Stampede Protection with singleflight

go
1import "golang.org/x/sync/singleflight"
2 
3type StampedeProtectedCache struct {
4 cache *Client
5 group singleflight.Group
6}
7 
8func (s *StampedeProtectedCache) GetOrLoad(ctx context.Context, key string, ttl time.Duration, loader func() (interface{}, error)) (interface{}, error) {
9 // Try cache first
10 var result interface{}
11 err := s.cache.Get(ctx, key, &result)
12 if err == nil {
13 return result, nil
14 }
15 
16 // Deduplicate concurrent loads for the same key
17 v, err, _ := s.group.Do(key, func() (interface{}, error) {
18 // Double-check cache (another goroutine may have populated it)
19 var cached interface{}
20 if err := s.cache.Get(ctx, key, &cached); err == nil {
21 return cached, nil
22 }
23 
24 value, err := loader()
25 if err != nil {
26 return nil, err
27 }
28 
29 s.cache.Set(ctx, key, value, ttl)
30 return value, nil
31 })
32 
33 return v, err
34}
35 

Monitoring

go
1type Metrics struct {
2 hits atomic.Int64
3 misses atomic.Int64
4}
5 
6func (m *Metrics) RecordHit(latency time.Duration) {
7 m.hits.Add(1)
8 // Export to Prometheus
9 cacheOperations.WithLabelValues("hit").Inc()
10 cacheLatency.WithLabelValues("hit").Observe(latency.Seconds())
11}
12 
13func (m *Metrics) RecordMiss(latency time.Duration) {
14 m.misses.Add(1)
15 cacheOperations.WithLabelValues("miss").Inc()
16 cacheLatency.WithLabelValues("miss").Observe(latency.Seconds())
17}
18 
19func (m *Metrics) HitRate() float64 {
20 h := m.hits.Load()
21 miss := m.misses.Load()
22 total := h + miss
23 if total == 0 {
24 return 0
25 }
26 return float64(h) / float64(total)
27}
28 

Testing

go
1func TestCacheGetOrLoad(t *testing.T) {
2 // Use miniredis for unit tests
3 mr := miniredis.RunT(t)
4 client, _ := cache.NewClient(cache.Config{Addr: mr.Addr()})
5 
6 loadCount := 0
7 loader := func() (interface{}, error) {
8 loadCount++
9 return map[string]string{"name": "Test Product"}, nil
10 }
11 
12 var result map[string]string
13 
14 // First call: cache miss, loader called
15 err := client.GetOrLoad(context.Background(), "product:1", &result, time.Minute, loader)
16 assert.NoError(t, err)
17 assert.Equal(t, 1, loadCount)
18 
19 // Second call: cache hit, loader not called
20 err = client.GetOrLoad(context.Background(), "product:1", &result, time.Minute, loader)
21 assert.NoError(t, err)
22 assert.Equal(t, 1, loadCount) // Still 1
23}
24 

Conclusion

Go provides an excellent foundation for distributed caching with its low memory overhead, built-in concurrency via goroutines, and the mature go-redis client. The singleflight package from the standard library extensions elegantly solves cache stampede protection. Combined with pipeline operations for batch access and the repository pattern for clean cache-aside implementations, Go delivers a caching layer that handles high throughput with minimal resource consumption.

FAQ

Need expert help?

Building with system design?

I help teams ship production-grade systems. From architecture review to hands-on builds.

Muneer Puthiya Purayil

SaaS Architect & AI Systems Engineer. 10+ years shipping production infrastructure across fintech, automotive, e-commerce, and healthcare.

Engage

Start a
Conversation.

For teams building at scale: SaaS platforms, agentic AI systems, and enterprise mobile infrastructure. Scope and fit are evaluated before any engagement begins.

Limited availability · Q3 / Q4 2026