Merge pull request #19114 from holiman/update_bigcache
vendor: update bigcache
This commit is contained in:
commit
b6ce358a9b
2
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
2
vendor/github.com/allegro/bigcache/bigcache.go
generated
vendored
@ -102,7 +102,7 @@ func (c *BigCache) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get reads entry for the key.
|
// Get reads entry for the key.
|
||||||
// It returns an EntryNotFoundError when
|
// It returns an ErrEntryNotFound when
|
||||||
// no entry exists for the given key.
|
// no entry exists for the given key.
|
||||||
func (c *BigCache) Get(key string) ([]byte, error) {
|
func (c *BigCache) Get(key string) ([]byte, error) {
|
||||||
hashedKey := c.hash.Sum64(key)
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
17
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
17
vendor/github.com/allegro/bigcache/entry_not_found_error.go
generated
vendored
@ -1,17 +1,6 @@
|
|||||||
package bigcache
|
package bigcache
|
||||||
|
|
||||||
import "fmt"
|
import "errors"
|
||||||
|
|
||||||
// EntryNotFoundError is an error type struct which is returned when entry was not found for provided key
|
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
|
||||||
type EntryNotFoundError struct {
|
var ErrEntryNotFound = errors.New("Entry not found")
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
func notFound(key string) error {
|
|
||||||
return &EntryNotFoundError{key}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returned when entry does not exist.
|
|
||||||
func (e EntryNotFoundError) Error() string {
|
|
||||||
return fmt.Sprintf("Entry %q not found", e.key)
|
|
||||||
}
|
|
||||||
|
36
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
36
vendor/github.com/allegro/bigcache/queue/bytes_queue.go
generated
vendored
@ -16,6 +16,12 @@ const (
|
|||||||
minimumEmptyBlobSize = 32 + headerEntrySize
|
minimumEmptyBlobSize = 32 + headerEntrySize
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errEmptyQueue = &queueError{"Empty queue"}
|
||||||
|
errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."}
|
||||||
|
errIndexOutOfBounds = &queueError{"Index out of range"}
|
||||||
|
)
|
||||||
|
|
||||||
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
||||||
// For every push operation index of entry is returned. It can be used to read the entry later
|
// For every push operation index of entry is returned. It can be used to read the entry later
|
||||||
type BytesQueue struct {
|
type BytesQueue struct {
|
||||||
@ -162,6 +168,11 @@ func (q *BytesQueue) Get(index int) ([]byte, error) {
|
|||||||
return data, err
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckGet checks if an entry can be read from index
|
||||||
|
func (q *BytesQueue) CheckGet(index int) error {
|
||||||
|
return q.peekCheckErr(index)
|
||||||
|
}
|
||||||
|
|
||||||
// Capacity returns number of allocated bytes for queue
|
// Capacity returns number of allocated bytes for queue
|
||||||
func (q *BytesQueue) Capacity() int {
|
func (q *BytesQueue) Capacity() int {
|
||||||
return q.capacity
|
return q.capacity
|
||||||
@ -177,18 +188,35 @@ func (e *queueError) Error() string {
|
|||||||
return e.message
|
return e.message
|
||||||
}
|
}
|
||||||
|
|
||||||
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
// peekCheckErr is identical to peek, but does not actually return any data
|
||||||
|
func (q *BytesQueue) peekCheckErr(index int) error {
|
||||||
|
|
||||||
if q.count == 0 {
|
if q.count == 0 {
|
||||||
return nil, 0, &queueError{"Empty queue"}
|
return errEmptyQueue
|
||||||
}
|
}
|
||||||
|
|
||||||
if index <= 0 {
|
if index <= 0 {
|
||||||
return nil, 0, &queueError{"Index must be grater than zero. Invalid index."}
|
return errInvalidIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
if index+headerEntrySize >= len(q.array) {
|
if index+headerEntrySize >= len(q.array) {
|
||||||
return nil, 0, &queueError{"Index out of range"}
|
return errIndexOutOfBounds
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
||||||
|
|
||||||
|
if q.count == 0 {
|
||||||
|
return nil, 0, errEmptyQueue
|
||||||
|
}
|
||||||
|
|
||||||
|
if index <= 0 {
|
||||||
|
return nil, 0, errInvalidIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
if index+headerEntrySize >= len(q.array) {
|
||||||
|
return nil, 0, errIndexOutOfBounds
|
||||||
}
|
}
|
||||||
|
|
||||||
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
|
blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize]))
|
||||||
|
41
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
41
vendor/github.com/allegro/bigcache/shard.go
generated
vendored
@ -32,7 +32,7 @@ func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
|||||||
if itemIndex == 0 {
|
if itemIndex == 0 {
|
||||||
s.lock.RUnlock()
|
s.lock.RUnlock()
|
||||||
s.miss()
|
s.miss()
|
||||||
return nil, notFound(key)
|
return nil, ErrEntryNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||||
@ -47,11 +47,12 @@ func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
s.lock.RUnlock()
|
s.lock.RUnlock()
|
||||||
s.collision()
|
s.collision()
|
||||||
return nil, notFound(key)
|
return nil, ErrEntryNotFound
|
||||||
}
|
}
|
||||||
|
entry := readEntry(wrappedEntry)
|
||||||
s.lock.RUnlock()
|
s.lock.RUnlock()
|
||||||
s.hit()
|
s.hit()
|
||||||
return readEntry(wrappedEntry), nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||||
@ -85,17 +86,17 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *cacheShard) del(key string, hashedKey uint64) error {
|
func (s *cacheShard) del(key string, hashedKey uint64) error {
|
||||||
|
// Optimistic pre-check using only readlock
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
itemIndex := s.hashmap[hashedKey]
|
itemIndex := s.hashmap[hashedKey]
|
||||||
|
|
||||||
if itemIndex == 0 {
|
if itemIndex == 0 {
|
||||||
s.lock.RUnlock()
|
s.lock.RUnlock()
|
||||||
s.delmiss()
|
s.delmiss()
|
||||||
return notFound(key)
|
return ErrEntryNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
if err := s.entries.CheckGet(int(itemIndex)); err != nil {
|
||||||
if err != nil {
|
|
||||||
s.lock.RUnlock()
|
s.lock.RUnlock()
|
||||||
s.delmiss()
|
s.delmiss()
|
||||||
return err
|
return err
|
||||||
@ -104,6 +105,23 @@ func (s *cacheShard) del(key string, hashedKey uint64) error {
|
|||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
{
|
{
|
||||||
|
// After obtaining the writelock, we need to read the same again,
|
||||||
|
// since the data delivered earlier may be stale now
|
||||||
|
itemIndex = s.hashmap[hashedKey]
|
||||||
|
|
||||||
|
if itemIndex == 0 {
|
||||||
|
s.lock.Unlock()
|
||||||
|
s.delmiss()
|
||||||
|
return ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||||
|
if err != nil {
|
||||||
|
s.lock.Unlock()
|
||||||
|
s.delmiss()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
delete(s.hashmap, hashedKey)
|
delete(s.hashmap, hashedKey)
|
||||||
s.onRemove(wrappedEntry, Deleted)
|
s.onRemove(wrappedEntry, Deleted)
|
||||||
resetKeyFromEntry(wrappedEntry)
|
resetKeyFromEntry(wrappedEntry)
|
||||||
@ -136,17 +154,22 @@ func (s *cacheShard) cleanUp(currentTimestamp uint64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *cacheShard) getOldestEntry() ([]byte, error) {
|
func (s *cacheShard) getOldestEntry() ([]byte, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
defer s.lock.RUnlock()
|
||||||
return s.entries.Peek()
|
return s.entries.Peek()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *cacheShard) getEntry(index int) ([]byte, error) {
|
func (s *cacheShard) getEntry(index int) ([]byte, error) {
|
||||||
return s.entries.Get(index)
|
s.lock.RLock()
|
||||||
|
entry, err := s.entries.Get(index)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
|
||||||
|
return entry, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
func (s *cacheShard) copyKeys() (keys []uint32, next int) {
|
||||||
keys = make([]uint32, len(s.hashmap))
|
|
||||||
|
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
|
keys = make([]uint32, len(s.hashmap))
|
||||||
|
|
||||||
for _, index := range s.hashmap {
|
for _, index := range s.hashmap {
|
||||||
keys[next] = index
|
keys[next] = index
|
||||||
|
12
vendor/vendor.json
vendored
12
vendor/vendor.json
vendored
@ -39,16 +39,16 @@
|
|||||||
"revisionTime": "2018-01-16T20:38:02Z"
|
"revisionTime": "2018-01-16T20:38:02Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "9Niiu1GNhWUrXnGZrl8AU4EzbVE=",
|
"checksumSHA1": "8skJYOdQytXjimcDPLRW4tonX3A=",
|
||||||
"path": "github.com/allegro/bigcache",
|
"path": "github.com/allegro/bigcache",
|
||||||
"revision": "bff00e20c68d9f136477d62d182a7dc917bae0ca",
|
"revision": "e24eb225f15679bbe54f91bfa7da3b00e59b9768",
|
||||||
"revisionTime": "2018-10-22T20:06:25Z"
|
"revisionTime": "2019-02-18T06:46:05Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "zqToN+R6KybEskp1D4G/lAOKXU4=",
|
"checksumSHA1": "vtT7NcYLatJmxVQQEeSESyrgVg0=",
|
||||||
"path": "github.com/allegro/bigcache/queue",
|
"path": "github.com/allegro/bigcache/queue",
|
||||||
"revision": "bff00e20c68d9f136477d62d182a7dc917bae0ca",
|
"revision": "e24eb225f15679bbe54f91bfa7da3b00e59b9768",
|
||||||
"revisionTime": "2018-10-22T20:06:25Z"
|
"revisionTime": "2019-02-18T06:46:05Z"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"checksumSHA1": "USkefO0g1U9mr+8hagv3fpSkrxg=",
|
"checksumSHA1": "USkefO0g1U9mr+8hagv3fpSkrxg=",
|
||||||
|
Loading…
Reference in New Issue
Block a user