forked from cerc-io/plugeth
203 lines
5.3 KiB
Go
203 lines
5.3 KiB
Go
package bigcache
|
|
|
|
import (
|
|
"fmt"
|
|
"time"
|
|
)
|
|
|
|
const (
|
|
minimumEntriesInShard = 10 // Minimum number of entries in single shard
|
|
)
|
|
|
|
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
|
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
|
|
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
|
type BigCache struct {
|
|
shards []*cacheShard
|
|
lifeWindow uint64
|
|
clock clock
|
|
hash Hasher
|
|
config Config
|
|
shardMask uint64
|
|
maxShardSize uint32
|
|
close chan struct{}
|
|
}
|
|
|
|
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
|
|
type RemoveReason uint32
|
|
|
|
const (
|
|
// Expired means the key is past its LifeWindow.
|
|
Expired RemoveReason = iota
|
|
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
|
|
// entry exceeded the maximum shard size.
|
|
NoSpace
|
|
// Deleted means Delete was called and this key was removed as a result.
|
|
Deleted
|
|
)
|
|
|
|
// NewBigCache initialize new instance of BigCache
|
|
func NewBigCache(config Config) (*BigCache, error) {
|
|
return newBigCache(config, &systemClock{})
|
|
}
|
|
|
|
func newBigCache(config Config, clock clock) (*BigCache, error) {
|
|
|
|
if !isPowerOfTwo(config.Shards) {
|
|
return nil, fmt.Errorf("Shards number must be power of two")
|
|
}
|
|
|
|
if config.Hasher == nil {
|
|
config.Hasher = newDefaultHasher()
|
|
}
|
|
|
|
cache := &BigCache{
|
|
shards: make([]*cacheShard, config.Shards),
|
|
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
|
clock: clock,
|
|
hash: config.Hasher,
|
|
config: config,
|
|
shardMask: uint64(config.Shards - 1),
|
|
maxShardSize: uint32(config.maximumShardSize()),
|
|
close: make(chan struct{}),
|
|
}
|
|
|
|
var onRemove func(wrappedEntry []byte, reason RemoveReason)
|
|
if config.OnRemove != nil {
|
|
onRemove = cache.providedOnRemove
|
|
} else if config.OnRemoveWithReason != nil {
|
|
onRemove = cache.providedOnRemoveWithReason
|
|
} else {
|
|
onRemove = cache.notProvidedOnRemove
|
|
}
|
|
|
|
for i := 0; i < config.Shards; i++ {
|
|
cache.shards[i] = initNewShard(config, onRemove, clock)
|
|
}
|
|
|
|
if config.CleanWindow > 0 {
|
|
go func() {
|
|
ticker := time.NewTicker(config.CleanWindow)
|
|
defer ticker.Stop()
|
|
for {
|
|
select {
|
|
case t := <-ticker.C:
|
|
cache.cleanUp(uint64(t.Unix()))
|
|
case <-cache.close:
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
return cache, nil
|
|
}
|
|
|
|
// Close is used to signal a shutdown of the cache when you are done with it.
|
|
// This allows the cleaning goroutines to exit and ensures references are not
|
|
// kept to the cache preventing GC of the entire cache.
|
|
func (c *BigCache) Close() error {
|
|
close(c.close)
|
|
return nil
|
|
}
|
|
|
|
// Get reads entry for the key.
|
|
// It returns an ErrEntryNotFound when
|
|
// no entry exists for the given key.
|
|
func (c *BigCache) Get(key string) ([]byte, error) {
|
|
hashedKey := c.hash.Sum64(key)
|
|
shard := c.getShard(hashedKey)
|
|
return shard.get(key, hashedKey)
|
|
}
|
|
|
|
// Set saves entry under the key
|
|
func (c *BigCache) Set(key string, entry []byte) error {
|
|
hashedKey := c.hash.Sum64(key)
|
|
shard := c.getShard(hashedKey)
|
|
return shard.set(key, hashedKey, entry)
|
|
}
|
|
|
|
// Delete removes the key
|
|
func (c *BigCache) Delete(key string) error {
|
|
hashedKey := c.hash.Sum64(key)
|
|
shard := c.getShard(hashedKey)
|
|
return shard.del(key, hashedKey)
|
|
}
|
|
|
|
// Reset empties all cache shards
|
|
func (c *BigCache) Reset() error {
|
|
for _, shard := range c.shards {
|
|
shard.reset(c.config)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Len computes number of entries in cache
|
|
func (c *BigCache) Len() int {
|
|
var len int
|
|
for _, shard := range c.shards {
|
|
len += shard.len()
|
|
}
|
|
return len
|
|
}
|
|
|
|
// Capacity returns amount of bytes store in the cache.
|
|
func (c *BigCache) Capacity() int {
|
|
var len int
|
|
for _, shard := range c.shards {
|
|
len += shard.capacity()
|
|
}
|
|
return len
|
|
}
|
|
|
|
// Stats returns cache's statistics
|
|
func (c *BigCache) Stats() Stats {
|
|
var s Stats
|
|
for _, shard := range c.shards {
|
|
tmp := shard.getStats()
|
|
s.Hits += tmp.Hits
|
|
s.Misses += tmp.Misses
|
|
s.DelHits += tmp.DelHits
|
|
s.DelMisses += tmp.DelMisses
|
|
s.Collisions += tmp.Collisions
|
|
}
|
|
return s
|
|
}
|
|
|
|
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
|
|
func (c *BigCache) Iterator() *EntryInfoIterator {
|
|
return newIterator(c)
|
|
}
|
|
|
|
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
|
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
|
if currentTimestamp-oldestTimestamp > c.lifeWindow {
|
|
evict(Expired)
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
func (c *BigCache) cleanUp(currentTimestamp uint64) {
|
|
for _, shard := range c.shards {
|
|
shard.cleanUp(currentTimestamp)
|
|
}
|
|
}
|
|
|
|
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
|
|
return c.shards[hashedKey&c.shardMask]
|
|
}
|
|
|
|
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
|
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
|
|
}
|
|
|
|
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) {
|
|
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 {
|
|
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason)
|
|
}
|
|
}
|
|
|
|
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
|
}
|