Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,12 @@ if entry, err := cache.Get("my-unique-key"); err == nil {

2. `CleanWindow` is a time. After that time, all the dead entries will be deleted, but not the entries that still have life.

BigCache supports a non-blocking cleanup mode (enabled by default) that uses concurrent processing and batching to minimize impact on cache performance during cleanup operations. This is especially beneficial for high-throughput systems where traditional cleanup processes might cause blocking or latency issues.

The cleanup process is designed to be non-blocking, using concurrent goroutines and batch processing to minimize impact on cache performance, especially under heavy load. This ensures that cache operations remain responsive even during cleanup cycles.

The cleanup process is designed to be non-blocking, using batch processing and concurrent operations to minimize impact on cache performance, especially under heavy load.

## [Benchmarks](https://github.com/allegro/bigcache-bench)

Three caches were compared: bigcache, [freecache](https://github.com/coocood/freecache) and map.
Expand Down
47 changes: 42 additions & 5 deletions assert_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,69 @@ package bigcache
import (
"bytes"
"fmt"
"path"
"path/filepath"
"reflect"
"runtime"
"testing"
)

func assertEqual(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
// assertEqual checks if two values are equal using reflect.DeepEqual
func assertEqual(t *testing.T, expected, actual interface{}) {
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Not equal: \nexpected: %v\nactual : %v", expected, actual)
}
}

// assertEqualValues checks if two values are equal using reflect.DeepEqual
// It's an alias for assertEqual to help fix the tests
func assertEqualValues(t *testing.T, expected, actual interface{}) {
if !reflect.DeepEqual(expected, actual) {
t.Errorf("Not equal: \nexpected: %v\nactual : %v", expected, actual)
}
}

// assertNotEqual checks if two values are not equal using reflect.DeepEqual
func assertNotEqual(t *testing.T, expected, actual interface{}) {
if reflect.DeepEqual(expected, actual) {
t.Errorf("Should not be equal: \nexpected: %v\nactual : %v", expected, actual)
}
}

// compareByteSlices is used in tests to compare byte slices
func compareByteSlices(a, b []byte) bool {
return bytes.Equal(a, b)
}

// assertNoError checks that err is nil
func assertNoError(t *testing.T, err error) {
if err != nil {
t.Errorf("Unexpected error: %s", err)
}
}

// assertEqualWithCaller checks if two values are equal and provides more detailed information about the caller
func assertEqualWithCaller(t *testing.T, expected, actual interface{}, msgAndArgs ...interface{}) {
if !objectsAreEqual(expected, actual) {
_, file, line, _ := runtime.Caller(1)
file = path.Base(file)
file = filepath.Base(file)
t.Errorf(fmt.Sprintf("\n%s:%d: Not equal: \n"+
"expected: %T(%#v)\n"+
"actual : %T(%#v)\n",
file, line, expected, expected, actual, actual), msgAndArgs...)
}
}

func noError(t *testing.T, e error) {
// assertNoErrorWithCaller is an alternative to assertNoError that provides more detailed caller information
func assertNoErrorWithCaller(t *testing.T, e error) {
if e != nil {
_, file, line, _ := runtime.Caller(1)
file = path.Base(file)
file = filepath.Base(file)
t.Errorf(fmt.Sprintf("\n%s:%d: Error is not nil: \n"+
"actual : %T(%#v)\n", file, line, e, e))
}
}

// objectsAreEqual is a helper function that handles byte slice comparison
func objectsAreEqual(expected, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
Expand Down
88 changes: 85 additions & 3 deletions bigcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package bigcache
import (
"context"
"errors"
"sync"
"time"
)

Expand All @@ -21,6 +22,8 @@ type BigCache struct {
config Config
shardMask uint64
close chan struct{}
cleanupWg sync.WaitGroup // Used to wait for cleanup goroutines during shutdown
metrics *metrics // Metrics for tracking cleanup performance
}

// Response will contain metadata about the entry for which GetWithInfo(key) was called
Expand Down Expand Up @@ -86,6 +89,7 @@ func newBigCache(ctx context.Context, config Config, clock clock) (*BigCache, er
config: config,
shardMask: uint64(config.Shards - 1),
close: make(chan struct{}),
metrics: newMetrics(config.EnableMetrics),
}

var onRemove func(wrappedEntry []byte, reason RemoveReason)
Expand All @@ -111,8 +115,15 @@ func newBigCache(ctx context.Context, config Config, clock clock) (*BigCache, er
select {
case <-ctx.Done():
return
case t := <-ticker.C:
cache.cleanUp(uint64(t.Unix()))
case <-ticker.C:
currentTimestamp := uint64(clock.Epoch())
if config.EnableNonBlockingCleanup {
// Use non-blocking cleanup to avoid performance issues under heavy load
cache.cleanUpNonBlocking()
} else {
// Use traditional blocking cleanup
cache.cleanUp(currentTimestamp)
}
case <-cache.close:
return
}
Expand All @@ -128,6 +139,11 @@ func newBigCache(ctx context.Context, config Config, clock clock) (*BigCache, er
// kept to the cache preventing GC of the entire cache.
func (c *BigCache) Close() error {
close(c.close)

// Wait for any ongoing cleanup operations to finish
// This ensures all resources are properly released
c.cleanupWg.Wait()

return nil
}

Expand Down Expand Up @@ -227,11 +243,20 @@ func (c *BigCache) KeyMetadata(key string) Metadata {
return shard.getKeyMetadataWithLock(hashedKey)
}

// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
// Iterator returns an iterator to traverse over EntryInfo's from whole cache.
func (c *BigCache) Iterator() *EntryInfoIterator {
return newIterator(c)
}

// GetMetrics returns a copy of current cache metrics.
// Returns empty metrics if metrics are not enabled.
func (c *BigCache) GetMetrics() Metrics {
if c.metrics == nil {
return Metrics{}
}
return c.metrics.Get()
}

func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
oldestTimestamp := readTimestampFromEntry(oldestEntry)
if currentTimestamp < oldestTimestamp {
Expand All @@ -250,6 +275,63 @@ func (c *BigCache) cleanUp(currentTimestamp uint64) {
}
}

// cleanUpNonBlocking performs asynchronous cleanup of expired entries
// This method doesn't block the main thread and processes shards concurrently
func (c *BigCache) cleanUpNonBlocking() {
currentTimestamp := uint64(c.clock.Epoch())
startTime := time.Now()

// Limit the number of concurrent cleanup goroutines to prevent resource exhaustion
maxConcurrentCleanups := 8
if len(c.shards) < maxConcurrentCleanups {
maxConcurrentCleanups = len(c.shards)
}

// Create a semaphore channel to limit concurrency
semaphore := make(chan struct{}, maxConcurrentCleanups)

// Channel to collect eviction counts if metrics are enabled
var evictionCounts chan int
if c.metrics != nil && c.config.EnableMetrics {
evictionCounts = make(chan int, len(c.shards))
}

// Start cleanup for each shard in a separate goroutine
c.cleanupWg.Add(len(c.shards))
for i := range c.shards {
go func(shard *cacheShard) {
// Acquire semaphore slot
semaphore <- struct{}{}
defer func() {
// Release semaphore slot
<-semaphore
c.cleanupWg.Done()
}()

// Clean up the shard in batches to reduce lock contention
evicted := shard.cleanUpInBatches(currentTimestamp)

// Report eviction count if metrics are enabled
if evictionCounts != nil {
evictionCounts <- evicted
}
}(c.shards[i])
}

// Collect metrics if enabled
if evictionCounts != nil {
go func() {
totalEvicted := 0
for i := 0; i < len(c.shards); i++ {
totalEvicted += <-evictionCounts
}

duration := time.Since(startTime)
c.metrics.recordCleanup(duration, totalEvicted)
}()
}
}

func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
return c.shards[hashedKey&c.shardMask]
}
Expand Down
8 changes: 8 additions & 0 deletions bigcache_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,3 +208,11 @@ func readFromCacheNonExistentKeys(b *testing.B, shards int) {
}
})
}

// Helper function - Max returns the larger of x or y.
func max(x, y int) int {
if x > y {
return x
}
return y
}
Loading