forked from cerc-io/plugeth
common/lru: add generic LRU implementation (#26162)
It seems there is no fully typed library implementation of an LRU cache. So I wrote one. Method names are the same as github.com/hashicorp/golang-lru, and the new type can be used as a drop-in replacement. Two reasons to do this: - It's much easier to understand what a cache is for when the types are right there. - Performance: the new implementation is slightly faster and performs zero memory allocations in Add when the cache is at capacity. Overall, memory usage of the cache is much reduced because keys are values are no longer wrapped in interface.
This commit is contained in:
parent
f58ebd9696
commit
9afc6816d2
223
common/lru/basiclru.go
Normal file
223
common/lru/basiclru.go
Normal file
@ -0,0 +1,223 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package lru implements generically-typed LRU caches.
|
||||
package lru
|
||||
|
||||
// BasicLRU is a simple LRU cache.
|
||||
//
|
||||
// This type is not safe for concurrent use.
|
||||
// The zero value is not valid, instances must be created using NewCache.
|
||||
type BasicLRU[K comparable, V any] struct {
|
||||
list *list[K]
|
||||
items map[K]cacheItem[K, V]
|
||||
cap int
|
||||
}
|
||||
|
||||
type cacheItem[K any, V any] struct {
|
||||
elem *listElem[K]
|
||||
value V
|
||||
}
|
||||
|
||||
// NewBasicLRU creates a new LRU cache.
|
||||
func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] {
|
||||
if capacity < 0 {
|
||||
capacity = 1
|
||||
}
|
||||
c := BasicLRU[K, V]{
|
||||
items: make(map[K]cacheItem[K, V]),
|
||||
list: newList[K](),
|
||||
cap: capacity,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an item was evicted to store the new item.
|
||||
func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) {
|
||||
item, ok := c.items[key]
|
||||
if ok {
|
||||
// Already exists in cache.
|
||||
item.value = value
|
||||
c.items[key] = item
|
||||
c.list.moveToFront(item.elem)
|
||||
return false
|
||||
}
|
||||
|
||||
var elem *listElem[K]
|
||||
if c.Len() >= c.cap {
|
||||
elem = c.list.removeLast()
|
||||
delete(c.items, elem.v)
|
||||
evicted = true
|
||||
} else {
|
||||
elem = new(listElem[K])
|
||||
}
|
||||
|
||||
// Store the new item.
|
||||
// Note that, if another item was evicted, we re-use its list element here.
|
||||
elem.v = key
|
||||
c.items[key] = cacheItem[K, V]{elem, value}
|
||||
c.list.pushElem(elem)
|
||||
return evicted
|
||||
}
|
||||
|
||||
// Contains reports whether the given key exists in the cache.
|
||||
func (c *BasicLRU[K, V]) Contains(key K) bool {
|
||||
_, ok := c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Get retrieves a value from the cache. This marks the key as recently used.
|
||||
func (c *BasicLRU[K, V]) Get(key K) (value V, ok bool) {
|
||||
item, ok := c.items[key]
|
||||
if !ok {
|
||||
return value, false
|
||||
}
|
||||
c.list.moveToFront(item.elem)
|
||||
return item.value, true
|
||||
}
|
||||
|
||||
// GetOldest retrieves the least-recently-used item.
|
||||
// Note that this does not update the item's recency.
|
||||
func (c *BasicLRU[K, V]) GetOldest() (key K, value V, ok bool) {
|
||||
lastElem := c.list.last()
|
||||
if lastElem == nil {
|
||||
return key, value, false
|
||||
}
|
||||
key = lastElem.v
|
||||
item := c.items[key]
|
||||
return key, item.value, true
|
||||
}
|
||||
|
||||
// Len returns the current number of items in the cache.
|
||||
func (c *BasicLRU[K, V]) Len() int {
|
||||
return len(c.items)
|
||||
}
|
||||
|
||||
// Peek retrieves a value from the cache, but does not mark the key as recently used.
|
||||
func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) {
|
||||
item, ok := c.items[key]
|
||||
return item.value, ok
|
||||
}
|
||||
|
||||
// Purge empties the cache.
|
||||
func (c *BasicLRU[K, V]) Purge() {
|
||||
c.list.init()
|
||||
for k := range c.items {
|
||||
delete(c.items, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove drops an item from the cache. Returns true if the key was present in cache.
|
||||
func (c *BasicLRU[K, V]) Remove(key K) bool {
|
||||
item, ok := c.items[key]
|
||||
if ok {
|
||||
delete(c.items, key)
|
||||
c.list.remove(item.elem)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// RemoveOldest drops the least recently used item.
|
||||
func (c *BasicLRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
|
||||
lastElem := c.list.last()
|
||||
if lastElem == nil {
|
||||
return key, value, false
|
||||
}
|
||||
|
||||
key = lastElem.v
|
||||
item := c.items[key]
|
||||
delete(c.items, key)
|
||||
c.list.remove(lastElem)
|
||||
return key, item.value, true
|
||||
}
|
||||
|
||||
// Keys returns all keys in the cache.
|
||||
func (c *BasicLRU[K, V]) Keys() []K {
|
||||
keys := make([]K, 0, len(c.items))
|
||||
return c.list.appendTo(keys)
|
||||
}
|
||||
|
||||
// list is a doubly-linked list holding items of type he.
|
||||
// The zero value is not valid, use newList to create lists.
|
||||
type list[T any] struct {
|
||||
root listElem[T]
|
||||
}
|
||||
|
||||
type listElem[T any] struct {
|
||||
next *listElem[T]
|
||||
prev *listElem[T]
|
||||
v T
|
||||
}
|
||||
|
||||
func newList[T any]() *list[T] {
|
||||
l := new(list[T])
|
||||
l.init()
|
||||
return l
|
||||
}
|
||||
|
||||
// init reinitializes the list, making it empty.
|
||||
func (l *list[T]) init() {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
}
|
||||
|
||||
// push adds an element to the front of the list.
|
||||
func (l *list[T]) pushElem(e *listElem[T]) {
|
||||
e.prev = &l.root
|
||||
e.next = l.root.next
|
||||
l.root.next = e
|
||||
e.next.prev = e
|
||||
}
|
||||
|
||||
// moveToFront makes 'node' the head of the list.
|
||||
func (l *list[T]) moveToFront(e *listElem[T]) {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
l.pushElem(e)
|
||||
}
|
||||
|
||||
// remove removes an element from the list.
|
||||
func (l *list[T]) remove(e *listElem[T]) {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next, e.prev = nil, nil
|
||||
}
|
||||
|
||||
// removeLast removes the last element of the list.
|
||||
func (l *list[T]) removeLast() *listElem[T] {
|
||||
last := l.last()
|
||||
if last != nil {
|
||||
l.remove(last)
|
||||
}
|
||||
return last
|
||||
}
|
||||
|
||||
// last returns the last element of the list, or nil if the list is empty.
|
||||
func (l *list[T]) last() *listElem[T] {
|
||||
e := l.root.prev
|
||||
if e == &l.root {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// appendTo appends all list elements to a slice.
|
||||
func (l *list[T]) appendTo(slice []T) []T {
|
||||
for e := l.root.prev; e != &l.root; e = e.prev {
|
||||
slice = append(slice, e.v)
|
||||
}
|
||||
return slice
|
||||
}
|
240
common/lru/basiclru_test.go
Normal file
240
common/lru/basiclru_test.go
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package lru
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Some of these test cases were adapted
|
||||
// from https://github.com/hashicorp/golang-lru/blob/master/simplelru/lru_test.go
|
||||
|
||||
func TestBasicLRU(t *testing.T) {
|
||||
cache := NewBasicLRU[int, int](128)
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
cache.Add(i, i)
|
||||
}
|
||||
if cache.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", cache.Len())
|
||||
}
|
||||
|
||||
// Check that Keys returns least-recent key first.
|
||||
keys := cache.Keys()
|
||||
if len(keys) != 128 {
|
||||
t.Fatal("wrong Keys() length", len(keys))
|
||||
}
|
||||
for i, k := range keys {
|
||||
v, ok := cache.Peek(k)
|
||||
if !ok {
|
||||
t.Fatalf("expected key %d be present", i)
|
||||
}
|
||||
if v != k {
|
||||
t.Fatalf("expected %d == %d", k, v)
|
||||
}
|
||||
if v != i+128 {
|
||||
t.Fatalf("wrong value at key %d: %d, want %d", i, v, i+128)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < 128; i++ {
|
||||
_, ok := cache.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("%d should be evicted", i)
|
||||
}
|
||||
}
|
||||
for i := 128; i < 256; i++ {
|
||||
_, ok := cache.Get(i)
|
||||
if !ok {
|
||||
t.Fatalf("%d should not be evicted", i)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 128; i < 192; i++ {
|
||||
ok := cache.Remove(i)
|
||||
if !ok {
|
||||
t.Fatalf("%d should be in cache", i)
|
||||
}
|
||||
ok = cache.Remove(i)
|
||||
if ok {
|
||||
t.Fatalf("%d should not be in cache", i)
|
||||
}
|
||||
_, ok = cache.Get(i)
|
||||
if ok {
|
||||
t.Fatalf("%d should be deleted", i)
|
||||
}
|
||||
}
|
||||
|
||||
// Request item 192.
|
||||
cache.Get(192)
|
||||
// It should be the last item returned by Keys().
|
||||
for i, k := range cache.Keys() {
|
||||
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||
t.Fatalf("out of order key: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
cache.Purge()
|
||||
if cache.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", cache.Len())
|
||||
}
|
||||
if _, ok := cache.Get(200); ok {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicLRUAddExistingKey(t *testing.T) {
|
||||
cache := NewBasicLRU[int, int](1)
|
||||
|
||||
cache.Add(1, 1)
|
||||
cache.Add(1, 2)
|
||||
|
||||
v, _ := cache.Get(1)
|
||||
if v != 2 {
|
||||
t.Fatal("wrong value:", v)
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks GetOldest and RemoveOldest.
|
||||
func TestBasicLRUGetOldest(t *testing.T) {
|
||||
cache := NewBasicLRU[int, int](128)
|
||||
for i := 0; i < 256; i++ {
|
||||
cache.Add(i, i)
|
||||
}
|
||||
|
||||
k, _, ok := cache.GetOldest()
|
||||
if !ok {
|
||||
t.Fatalf("missing")
|
||||
}
|
||||
if k != 128 {
|
||||
t.Fatalf("bad: %v", k)
|
||||
}
|
||||
|
||||
k, _, ok = cache.RemoveOldest()
|
||||
if !ok {
|
||||
t.Fatalf("missing")
|
||||
}
|
||||
if k != 128 {
|
||||
t.Fatalf("bad: %v", k)
|
||||
}
|
||||
|
||||
k, _, ok = cache.RemoveOldest()
|
||||
if !ok {
|
||||
t.Fatalf("missing oldest item")
|
||||
}
|
||||
if k != 129 {
|
||||
t.Fatalf("wrong oldest item: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Add returns true/false if an eviction occurred
|
||||
func TestBasicLRUAddReturnValue(t *testing.T) {
|
||||
cache := NewBasicLRU[int, int](1)
|
||||
if cache.Add(1, 1) {
|
||||
t.Errorf("first add shouldn't have evicted")
|
||||
}
|
||||
if !cache.Add(2, 2) {
|
||||
t.Errorf("second add should have evicted")
|
||||
}
|
||||
}
|
||||
|
||||
// This test verifies that Contains doesn't change item recency.
|
||||
func TestBasicLRUContains(t *testing.T) {
|
||||
cache := NewBasicLRU[int, int](2)
|
||||
cache.Add(1, 1)
|
||||
cache.Add(2, 2)
|
||||
if !cache.Contains(1) {
|
||||
t.Errorf("1 should be in the cache")
|
||||
}
|
||||
cache.Add(3, 3)
|
||||
if cache.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recency of 1")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRU(b *testing.B) {
|
||||
var (
|
||||
capacity = 1000
|
||||
indexes = make([]int, capacity*20)
|
||||
keys = make([]string, capacity)
|
||||
values = make([][]byte, capacity)
|
||||
)
|
||||
for i := range indexes {
|
||||
indexes[i] = rand.Intn(capacity)
|
||||
}
|
||||
for i := range keys {
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
keys[i] = string(b)
|
||||
rand.Read(b)
|
||||
values[i] = b
|
||||
}
|
||||
|
||||
var sink []byte
|
||||
|
||||
b.Run("Add/BasicLRU", func(b *testing.B) {
|
||||
cache := NewBasicLRU[int, int](capacity)
|
||||
for i := 0; i < b.N; i++ {
|
||||
cache.Add(i, i)
|
||||
}
|
||||
})
|
||||
b.Run("Get/BasicLRU", func(b *testing.B) {
|
||||
cache := NewBasicLRU[string, []byte](capacity)
|
||||
for i := 0; i < capacity; i++ {
|
||||
index := indexes[i]
|
||||
cache.Add(keys[index], values[index])
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
k := keys[indexes[i%len(indexes)]]
|
||||
v, ok := cache.Get(k)
|
||||
if ok {
|
||||
sink = v
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// // vs. github.com/hashicorp/golang-lru/simplelru
|
||||
// b.Run("Add/simplelru.LRU", func(b *testing.B) {
|
||||
// cache, _ := simplelru.NewLRU(capacity, nil)
|
||||
// for i := 0; i < b.N; i++ {
|
||||
// cache.Add(i, i)
|
||||
// }
|
||||
// })
|
||||
// b.Run("Get/simplelru.LRU", func(b *testing.B) {
|
||||
// cache, _ := simplelru.NewLRU(capacity, nil)
|
||||
// for i := 0; i < capacity; i++ {
|
||||
// index := indexes[i]
|
||||
// cache.Add(keys[index], values[index])
|
||||
// }
|
||||
//
|
||||
// b.ResetTimer()
|
||||
// for i := 0; i < b.N; i++ {
|
||||
// k := keys[indexes[i%len(indexes)]]
|
||||
// v, ok := cache.Get(k)
|
||||
// if ok {
|
||||
// sink = v.([]byte)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
|
||||
fmt.Fprintln(io.Discard, sink)
|
||||
}
|
@ -19,33 +19,32 @@ package lru
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// SizeConstrainedLRU is a wrapper around simplelru.LRU. The simplelru.LRU is capable
|
||||
// of item-count constraints, but is not capable of enforcing a byte-size constraint,
|
||||
// hence this wrapper.
|
||||
// blobType is the type constraint for values stored in SizeConstrainedCache.
|
||||
type blobType interface {
|
||||
~[]byte | ~string
|
||||
}
|
||||
|
||||
// SizeConstrainedCache is a cache where capacity is in bytes (instead of item count). When the cache
|
||||
// is at capacity, and a new item is added, older items are evicted until the size
|
||||
// constraint is met.
|
||||
//
|
||||
// OBS: This cache assumes that items are content-addressed: keys are unique per content.
|
||||
// In other words: two Add(..) with the same key K, will always have the same value V.
|
||||
type SizeConstrainedLRU struct {
|
||||
type SizeConstrainedCache[K comparable, V blobType] struct {
|
||||
size uint64
|
||||
maxSize uint64
|
||||
lru *simplelru.LRU
|
||||
lru BasicLRU[K, V]
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// NewSizeConstrainedLRU creates a new SizeConstrainedLRU.
|
||||
func NewSizeConstrainedLRU(max uint64) *SizeConstrainedLRU {
|
||||
lru, err := simplelru.NewLRU(math.MaxInt, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &SizeConstrainedLRU{
|
||||
// NewSizeConstrainedCache creates a new size-constrained LRU cache.
|
||||
func NewSizeConstrainedCache[K comparable, V blobType](maxSize uint64) *SizeConstrainedCache[K, V] {
|
||||
return &SizeConstrainedCache[K, V]{
|
||||
size: 0,
|
||||
maxSize: max,
|
||||
lru: lru,
|
||||
maxSize: maxSize,
|
||||
lru: NewBasicLRU[K, V](math.MaxInt),
|
||||
}
|
||||
}
|
||||
|
||||
@ -53,7 +52,7 @@ func NewSizeConstrainedLRU(max uint64) *SizeConstrainedLRU {
|
||||
// OBS: This cache assumes that items are content-addressed: keys are unique per content.
|
||||
// In other words: two Add(..) with the same key K, will always have the same value V.
|
||||
// OBS: The value is _not_ copied on Add, so the caller must not modify it afterwards.
|
||||
func (c *SizeConstrainedLRU) Add(key common.Hash, value []byte) (evicted bool) {
|
||||
func (c *SizeConstrainedCache[K, V]) Add(key K, value V) (evicted bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
@ -68,7 +67,7 @@ func (c *SizeConstrainedLRU) Add(key common.Hash, value []byte) (evicted bool) {
|
||||
// list is now empty. Break
|
||||
break
|
||||
}
|
||||
targetSize -= uint64(len(v.([]byte)))
|
||||
targetSize -= uint64(len(v))
|
||||
}
|
||||
c.size = targetSize
|
||||
}
|
||||
@ -77,12 +76,9 @@ func (c *SizeConstrainedLRU) Add(key common.Hash, value []byte) (evicted bool) {
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *SizeConstrainedLRU) Get(key common.Hash) []byte {
|
||||
func (c *SizeConstrainedCache[K, V]) Get(key K) (V, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if v, ok := c.lru.Get(key); ok {
|
||||
return v.([]byte)
|
||||
}
|
||||
return nil
|
||||
return c.lru.Get(key)
|
||||
}
|
||||
|
@ -20,22 +20,21 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
func mkHash(i int) common.Hash {
|
||||
h := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(h, uint64(i))
|
||||
return common.BytesToHash(h)
|
||||
type testKey [8]byte
|
||||
|
||||
func mkKey(i int) (key testKey) {
|
||||
binary.LittleEndian.PutUint64(key[:], uint64(i))
|
||||
return key
|
||||
}
|
||||
|
||||
func TestBlobLru(t *testing.T) {
|
||||
lru := NewSizeConstrainedLRU(100)
|
||||
func TestSizeConstrainedCache(t *testing.T) {
|
||||
lru := NewSizeConstrainedCache[testKey, []byte](100)
|
||||
var want uint64
|
||||
// Add 11 items of 10 byte each. First item should be swapped out
|
||||
for i := 0; i < 11; i++ {
|
||||
k := mkHash(i)
|
||||
k := mkKey(i)
|
||||
v := fmt.Sprintf("value-%04d", i)
|
||||
lru.Add(k, []byte(v))
|
||||
want += uint64(len(v))
|
||||
@ -48,17 +47,17 @@ func TestBlobLru(t *testing.T) {
|
||||
}
|
||||
// Zero:th should be evicted
|
||||
{
|
||||
k := mkHash(0)
|
||||
if val := lru.Get(k); val != nil {
|
||||
k := mkKey(0)
|
||||
if _, ok := lru.Get(k); ok {
|
||||
t.Fatalf("should be evicted: %v", k)
|
||||
}
|
||||
}
|
||||
// Elems 1-11 should be present
|
||||
for i := 1; i < 11; i++ {
|
||||
k := mkHash(i)
|
||||
k := mkKey(i)
|
||||
want := fmt.Sprintf("value-%04d", i)
|
||||
have := lru.Get(k)
|
||||
if have == nil {
|
||||
have, ok := lru.Get(k)
|
||||
if !ok {
|
||||
t.Fatalf("missing key %v", k)
|
||||
}
|
||||
if string(have) != want {
|
||||
@ -67,26 +66,26 @@ func TestBlobLru(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlobLruOverflow tests what happens when inserting an element exceeding
|
||||
// the max size
|
||||
func TestBlobLruOverflow(t *testing.T) {
|
||||
lru := NewSizeConstrainedLRU(100)
|
||||
// This test adds inserting an element exceeding the max size.
|
||||
func TestSizeConstrainedCacheOverflow(t *testing.T) {
|
||||
lru := NewSizeConstrainedCache[testKey, []byte](100)
|
||||
|
||||
// Add 10 items of 10 byte each, filling the cache
|
||||
for i := 0; i < 10; i++ {
|
||||
k := mkHash(i)
|
||||
k := mkKey(i)
|
||||
v := fmt.Sprintf("value-%04d", i)
|
||||
lru.Add(k, []byte(v))
|
||||
}
|
||||
// Add one single large elem. We expect it to swap out all entries.
|
||||
{
|
||||
k := mkHash(1337)
|
||||
k := mkKey(1337)
|
||||
v := make([]byte, 200)
|
||||
lru.Add(k, v)
|
||||
}
|
||||
// Elems 0-9 should be missing
|
||||
for i := 1; i < 10; i++ {
|
||||
k := mkHash(i)
|
||||
if val := lru.Get(k); val != nil {
|
||||
k := mkKey(i)
|
||||
if _, ok := lru.Get(k); ok {
|
||||
t.Fatalf("should be evicted: %v", k)
|
||||
}
|
||||
}
|
||||
@ -97,7 +96,7 @@ func TestBlobLruOverflow(t *testing.T) {
|
||||
// Adding one small item should swap out the large one
|
||||
{
|
||||
i := 0
|
||||
k := mkHash(i)
|
||||
k := mkKey(i)
|
||||
v := fmt.Sprintf("value-%04d", i)
|
||||
lru.Add(k, []byte(v))
|
||||
if have, want := lru.size, uint64(10); have != want {
|
||||
@ -106,17 +105,51 @@ func TestBlobLruOverflow(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestBlobLruSameItem tests what happens when inserting the same k/v multiple times.
|
||||
func TestBlobLruSameItem(t *testing.T) {
|
||||
lru := NewSizeConstrainedLRU(100)
|
||||
// Add one 10 byte-item 10 times
|
||||
k := mkHash(0)
|
||||
// This checks what happens when inserting the same k/v multiple times.
|
||||
func TestSizeConstrainedCacheSameItem(t *testing.T) {
|
||||
lru := NewSizeConstrainedCache[testKey, []byte](100)
|
||||
|
||||
// Add one 10 byte-item 10 times.
|
||||
k := mkKey(0)
|
||||
v := fmt.Sprintf("value-%04d", 0)
|
||||
for i := 0; i < 10; i++ {
|
||||
lru.Add(k, []byte(v))
|
||||
}
|
||||
// The size should be accurate
|
||||
|
||||
// The size should be accurate.
|
||||
if have, want := lru.size, uint64(10); have != want {
|
||||
t.Fatalf("size wrong, have %d want %d", have, want)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests that empty/nil values are handled correctly.
|
||||
func TestSizeConstrainedCacheEmpties(t *testing.T) {
|
||||
lru := NewSizeConstrainedCache[testKey, []byte](100)
|
||||
|
||||
// This test abuses the lru a bit, using different keys for identical value(s).
|
||||
for i := 0; i < 10; i++ {
|
||||
lru.Add(testKey{byte(i)}, []byte{})
|
||||
lru.Add(testKey{byte(255 - i)}, nil)
|
||||
}
|
||||
|
||||
// The size should not count, only the values count. So this could be a DoS
|
||||
// since it basically has no cap, and it is intentionally overloaded with
|
||||
// different-keyed 0-length values.
|
||||
if have, want := lru.size, uint64(0); have != want {
|
||||
t.Fatalf("size wrong, have %d want %d", have, want)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
if v, ok := lru.Get(testKey{byte(i)}); !ok {
|
||||
t.Fatalf("test %d: expected presence", i)
|
||||
} else if v == nil {
|
||||
t.Fatalf("test %d, v is nil", i)
|
||||
}
|
||||
|
||||
if v, ok := lru.Get(testKey{byte(255 - i)}); !ok {
|
||||
t.Fatalf("test %d: expected presence", i)
|
||||
} else if v != nil {
|
||||
t.Fatalf("test %d, v is not nil", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
95
common/lru/lru.go
Normal file
95
common/lru/lru.go
Normal file
@ -0,0 +1,95 @@
|
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package lru
|
||||
|
||||
import "sync"
|
||||
|
||||
// Cache is a LRU cache.
|
||||
// This type is safe for concurrent use.
|
||||
type Cache[K comparable, V any] struct {
|
||||
cache BasicLRU[K, V]
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewCache creates an LRU cache.
|
||||
func NewCache[K comparable, V any](capacity int) *Cache[K, V] {
|
||||
return &Cache[K, V]{cache: NewBasicLRU[K, V](capacity)}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an item was evicted to store the new item.
|
||||
func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Add(key, value)
|
||||
}
|
||||
|
||||
// Contains reports whether the given key exists in the cache.
|
||||
func (c *Cache[K, V]) Contains(key K) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Contains(key)
|
||||
}
|
||||
|
||||
// Get retrieves a value from the cache. This marks the key as recently used.
|
||||
func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Get(key)
|
||||
}
|
||||
|
||||
// Len returns the current number of items in the cache.
|
||||
func (c *Cache[K, V]) Len() int {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Len()
|
||||
}
|
||||
|
||||
// Peek retrieves a value from the cache, but does not mark the key as recently used.
|
||||
func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Peek(key)
|
||||
}
|
||||
|
||||
// Purge empties the cache.
|
||||
func (c *Cache[K, V]) Purge() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.cache.Purge()
|
||||
}
|
||||
|
||||
// Remove drops an item from the cache. Returns true if the key was present in cache.
|
||||
func (c *Cache[K, V]) Remove(key K) bool {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Remove(key)
|
||||
}
|
||||
|
||||
// Keys returns all keys of items currently in the LRU.
|
||||
func (c *Cache[K, V]) Keys() []K {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
return c.cache.Keys()
|
||||
}
|
@ -30,6 +30,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/common/prque"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
@ -45,8 +46,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -200,12 +201,14 @@ type BlockChain struct {
|
||||
currentSafeBlock atomic.Value // Current safe head
|
||||
|
||||
stateCache state.Database // State database to reuse between imports (contains state cache)
|
||||
bodyCache *lru.Cache // Cache for the most recent block bodies
|
||||
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
|
||||
receiptsCache *lru.Cache // Cache for the most recent receipts per block
|
||||
blockCache *lru.Cache // Cache for the most recent entire blocks
|
||||
txLookupCache *lru.Cache // Cache for the most recent transaction lookup data.
|
||||
futureBlocks *lru.Cache // future blocks are blocks added for later processing
|
||||
bodyCache *lru.Cache[common.Hash, *types.Body]
|
||||
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
|
||||
receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
|
||||
blockCache *lru.Cache[common.Hash, *types.Block]
|
||||
txLookupCache *lru.Cache[common.Hash, *rawdb.LegacyTxLookupEntry]
|
||||
|
||||
// future blocks are blocks added for later processing
|
||||
futureBlocks *lru.Cache[common.Hash, *types.Block]
|
||||
|
||||
wg sync.WaitGroup //
|
||||
quit chan struct{} // shutdown signal, closed in Stop.
|
||||
@ -227,12 +230,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
if cacheConfig == nil {
|
||||
cacheConfig = defaultCacheConfig
|
||||
}
|
||||
bodyCache, _ := lru.New(bodyCacheLimit)
|
||||
bodyRLPCache, _ := lru.New(bodyCacheLimit)
|
||||
receiptsCache, _ := lru.New(receiptsCacheLimit)
|
||||
blockCache, _ := lru.New(blockCacheLimit)
|
||||
txLookupCache, _ := lru.New(txLookupCacheLimit)
|
||||
futureBlocks, _ := lru.New(maxFutureBlocks)
|
||||
|
||||
// Setup the genesis block, commit the provided genesis specification
|
||||
// to database if the genesis block is not present yet, or load the
|
||||
@ -261,12 +258,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
|
||||
}),
|
||||
quit: make(chan struct{}),
|
||||
chainmu: syncx.NewClosableMutex(),
|
||||
bodyCache: bodyCache,
|
||||
bodyRLPCache: bodyRLPCache,
|
||||
receiptsCache: receiptsCache,
|
||||
blockCache: blockCache,
|
||||
txLookupCache: txLookupCache,
|
||||
futureBlocks: futureBlocks,
|
||||
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit),
|
||||
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit),
|
||||
receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit),
|
||||
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit),
|
||||
txLookupCache: lru.NewCache[common.Hash, *rawdb.LegacyTxLookupEntry](txLookupCacheLimit),
|
||||
futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks),
|
||||
engine: engine,
|
||||
vmConfig: vmConfig,
|
||||
}
|
||||
@ -957,7 +954,7 @@ func (bc *BlockChain) procFutureBlocks() {
|
||||
blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
|
||||
for _, hash := range bc.futureBlocks.Keys() {
|
||||
if block, exist := bc.futureBlocks.Peek(hash); exist {
|
||||
blocks = append(blocks, block.(*types.Block))
|
||||
blocks = append(blocks, block)
|
||||
}
|
||||
}
|
||||
if len(blocks) > 0 {
|
||||
|
@ -96,8 +96,7 @@ func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
|
||||
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||
if cached, ok := bc.bodyCache.Get(hash); ok {
|
||||
body := cached.(*types.Body)
|
||||
return body
|
||||
return cached
|
||||
}
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
@ -117,7 +116,7 @@ func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
||||
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
|
||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
|
||||
return cached.(rlp.RawValue)
|
||||
return cached
|
||||
}
|
||||
number := bc.hc.GetBlockNumber(hash)
|
||||
if number == nil {
|
||||
@ -159,7 +158,7 @@ func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
|
||||
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
|
||||
// Short circuit if the block's already in the cache, retrieve otherwise
|
||||
if block, ok := bc.blockCache.Get(hash); ok {
|
||||
return block.(*types.Block)
|
||||
return block
|
||||
}
|
||||
block := rawdb.ReadBlock(bc.db, hash, number)
|
||||
if block == nil {
|
||||
@ -211,7 +210,7 @@ func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*type
|
||||
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
|
||||
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
|
||||
if receipts, ok := bc.receiptsCache.Get(hash); ok {
|
||||
return receipts.(types.Receipts)
|
||||
return receipts
|
||||
}
|
||||
number := rawdb.ReadHeaderNumber(bc.db, hash)
|
||||
if number == nil {
|
||||
@ -255,7 +254,7 @@ func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, max
|
||||
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
|
||||
// Short circuit if the txlookup already in the cache, retrieve otherwise
|
||||
if lookup, exist := bc.txLookupCache.Get(hash); exist {
|
||||
return lookup.(*rawdb.LegacyTxLookupEntry)
|
||||
return lookup
|
||||
}
|
||||
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
|
||||
if tx == nil {
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/consensus"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
@ -34,7 +35,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -64,9 +64,9 @@ type HeaderChain struct {
|
||||
currentHeader atomic.Value // Current head of the header chain (may be above the block chain!)
|
||||
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
|
||||
|
||||
headerCache *lru.Cache // Cache for the most recent block headers
|
||||
tdCache *lru.Cache // Cache for the most recent block total difficulties
|
||||
numberCache *lru.Cache // Cache for the most recent block numbers
|
||||
headerCache *lru.Cache[common.Hash, *types.Header]
|
||||
tdCache *lru.Cache[common.Hash, *big.Int] // most recent total difficulties
|
||||
numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers
|
||||
|
||||
procInterrupt func() bool
|
||||
|
||||
@ -77,10 +77,6 @@ type HeaderChain struct {
|
||||
// NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
|
||||
// to the parent's interrupt semaphore.
|
||||
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
|
||||
headerCache, _ := lru.New(headerCacheLimit)
|
||||
tdCache, _ := lru.New(tdCacheLimit)
|
||||
numberCache, _ := lru.New(numberCacheLimit)
|
||||
|
||||
// Seed a fast but crypto originating random generator
|
||||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
@ -89,9 +85,9 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
|
||||
hc := &HeaderChain{
|
||||
config: config,
|
||||
chainDb: chainDb,
|
||||
headerCache: headerCache,
|
||||
tdCache: tdCache,
|
||||
numberCache: numberCache,
|
||||
headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit),
|
||||
tdCache: lru.NewCache[common.Hash, *big.Int](tdCacheLimit),
|
||||
numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit),
|
||||
procInterrupt: procInterrupt,
|
||||
rand: mrand.New(mrand.NewSource(seed.Int64())),
|
||||
engine: engine,
|
||||
@ -115,8 +111,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
|
||||
// from the cache or database
|
||||
func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
|
||||
if cached, ok := hc.numberCache.Get(hash); ok {
|
||||
number := cached.(uint64)
|
||||
return &number
|
||||
return &cached
|
||||
}
|
||||
number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
|
||||
if number != nil {
|
||||
@ -442,7 +437,7 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma
|
||||
func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||
// Short circuit if the td's already in the cache, retrieve otherwise
|
||||
if cached, ok := hc.tdCache.Get(hash); ok {
|
||||
return cached.(*big.Int)
|
||||
return cached
|
||||
}
|
||||
td := rawdb.ReadTd(hc.chainDb, hash, number)
|
||||
if td == nil {
|
||||
@ -458,7 +453,7 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
|
||||
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
|
||||
// Short circuit if the header's already in the cache, retrieve otherwise
|
||||
if header, ok := hc.headerCache.Get(hash); ok {
|
||||
return header.(*types.Header)
|
||||
return header
|
||||
}
|
||||
header := rawdb.ReadHeader(hc.chainDb, hash, number)
|
||||
if header == nil {
|
||||
@ -525,10 +520,9 @@ func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
h := header.(*types.Header)
|
||||
rlpData, _ := rlp.EncodeToBytes(h)
|
||||
rlpData, _ := rlp.EncodeToBytes(header)
|
||||
headers = append(headers, rlpData)
|
||||
hash = h.ParentHash
|
||||
hash = header.ParentHash
|
||||
count--
|
||||
number--
|
||||
}
|
||||
|
@ -21,12 +21,11 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
lru2 "github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -130,20 +129,19 @@ func NewDatabase(db ethdb.Database) Database {
|
||||
// is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a
|
||||
// large memory cache.
|
||||
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
||||
csc, _ := lru.New(codeSizeCacheSize)
|
||||
return &cachingDB{
|
||||
db: trie.NewDatabaseWithConfig(db, config),
|
||||
disk: db,
|
||||
codeSizeCache: csc,
|
||||
codeCache: lru2.NewSizeConstrainedLRU(codeCacheSize),
|
||||
codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
|
||||
codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
type cachingDB struct {
|
||||
db *trie.Database
|
||||
disk ethdb.KeyValueStore
|
||||
codeSizeCache *lru.Cache
|
||||
codeCache *lru2.SizeConstrainedLRU
|
||||
codeSizeCache *lru.Cache[common.Hash, int]
|
||||
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
|
||||
}
|
||||
|
||||
// OpenTrie opens the main account trie at a specific root hash.
|
||||
@ -176,10 +174,11 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
|
||||
|
||||
// ContractCode retrieves a particular contract's code.
|
||||
func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
|
||||
if code := db.codeCache.Get(codeHash); len(code) > 0 {
|
||||
code, _ := db.codeCache.Get(codeHash)
|
||||
if len(code) > 0 {
|
||||
return code, nil
|
||||
}
|
||||
code := rawdb.ReadCode(db.disk, codeHash)
|
||||
code = rawdb.ReadCode(db.disk, codeHash)
|
||||
if len(code) > 0 {
|
||||
db.codeCache.Add(codeHash, code)
|
||||
db.codeSizeCache.Add(codeHash, len(code))
|
||||
@ -192,10 +191,11 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error
|
||||
// code can't be found in the cache, then check the existence with **new**
|
||||
// db scheme.
|
||||
func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error) {
|
||||
if code := db.codeCache.Get(codeHash); len(code) > 0 {
|
||||
code, _ := db.codeCache.Get(codeHash)
|
||||
if len(code) > 0 {
|
||||
return code, nil
|
||||
}
|
||||
code := rawdb.ReadCodeWithPrefix(db.disk, codeHash)
|
||||
code = rawdb.ReadCodeWithPrefix(db.disk, codeHash)
|
||||
if len(code) > 0 {
|
||||
db.codeCache.Add(codeHash, code)
|
||||
db.codeSizeCache.Add(codeHash, len(code))
|
||||
@ -207,7 +207,7 @@ func (db *cachingDB) ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]b
|
||||
// ContractCodeSize retrieves a particular contracts code's size.
|
||||
func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
|
||||
if cached, ok := db.codeSizeCache.Get(codeHash); ok {
|
||||
return cached.(int), nil
|
||||
return cached, nil
|
||||
}
|
||||
code, err := db.ContractCode(addrHash, codeHash)
|
||||
return len(code), err
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/bloombits"
|
||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||
@ -35,7 +36,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// Config represents the configuration of the filter system.
|
||||
@ -77,21 +77,16 @@ type Backend interface {
|
||||
// FilterSystem holds resources shared by all filters.
|
||||
type FilterSystem struct {
|
||||
backend Backend
|
||||
logsCache *lru.Cache
|
||||
logsCache *lru.Cache[common.Hash, [][]*types.Log]
|
||||
cfg *Config
|
||||
}
|
||||
|
||||
// NewFilterSystem creates a filter system.
|
||||
func NewFilterSystem(backend Backend, config Config) *FilterSystem {
|
||||
config = config.withDefaults()
|
||||
|
||||
cache, err := lru.New(config.LogCacheSize)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &FilterSystem{
|
||||
backend: backend,
|
||||
logsCache: cache,
|
||||
logsCache: lru.NewCache[common.Hash, [][]*types.Log](config.LogCacheSize),
|
||||
cfg: &config,
|
||||
}
|
||||
}
|
||||
@ -100,7 +95,7 @@ func NewFilterSystem(backend Backend, config Config) *FilterSystem {
|
||||
func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) {
|
||||
cached, ok := sys.logsCache.Get(blockHash)
|
||||
if ok {
|
||||
return cached.([][]*types.Log), nil
|
||||
return cached, nil
|
||||
}
|
||||
|
||||
logs, err := sys.backend.GetLogs(ctx, blockHash, number)
|
||||
|
@ -56,7 +56,12 @@ type blockFees struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// processedFees contains the results of a processed block and is also used for caching
|
||||
type cacheKey struct {
|
||||
number uint64
|
||||
percentiles string
|
||||
}
|
||||
|
||||
// processedFees contains the results of a processed block.
|
||||
type processedFees struct {
|
||||
reward []*big.Int
|
||||
baseFee, nextBaseFee *big.Int
|
||||
@ -270,13 +275,10 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast
|
||||
oracle.processBlock(fees, rewardPercentiles)
|
||||
results <- fees
|
||||
} else {
|
||||
cacheKey := struct {
|
||||
number uint64
|
||||
percentiles string
|
||||
}{blockNumber, string(percentileKey)}
|
||||
cacheKey := cacheKey{number: blockNumber, percentiles: string(percentileKey)}
|
||||
|
||||
if p, ok := oracle.historyCache.Get(cacheKey); ok {
|
||||
fees.results = p.(processedFees)
|
||||
fees.results = p
|
||||
results <- fees
|
||||
} else {
|
||||
if len(rewardPercentiles) != 0 {
|
||||
|
@ -23,13 +23,13 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
const sampleNumber = 3 // Number of transactions sampled in a block
|
||||
@ -72,7 +72,8 @@ type Oracle struct {
|
||||
|
||||
checkBlocks, percentile int
|
||||
maxHeaderHistory, maxBlockHistory int
|
||||
historyCache *lru.Cache
|
||||
|
||||
historyCache *lru.Cache[cacheKey, processedFees]
|
||||
}
|
||||
|
||||
// NewOracle returns a new gasprice oracle which can recommend suitable
|
||||
@ -114,7 +115,7 @@ func NewOracle(backend OracleBackend, params Config) *Oracle {
|
||||
log.Warn("Sanitizing invalid gasprice oracle max block history", "provided", params.MaxBlockHistory, "updated", maxBlockHistory)
|
||||
}
|
||||
|
||||
cache, _ := lru.New(2048)
|
||||
cache := lru.NewCache[cacheKey, processedFees](2048)
|
||||
headEvent := make(chan core.ChainHeadEvent, 1)
|
||||
backend.SubscribeChainHeadEvent(headEvent)
|
||||
go func() {
|
||||
|
@ -22,10 +22,10 @@ import (
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const handshakeTimeout = time.Second
|
||||
@ -33,7 +33,7 @@ const handshakeTimeout = time.Second
|
||||
// The SessionCache keeps negotiated encryption keys and
|
||||
// state for in-progress handshakes in the Discovery v5 wire protocol.
|
||||
type SessionCache struct {
|
||||
sessions *simplelru.LRU
|
||||
sessions lru.BasicLRU[sessionID, *session]
|
||||
handshakes map[sessionID]*Whoareyou
|
||||
clock mclock.Clock
|
||||
|
||||
@ -62,12 +62,8 @@ func (s *session) keysFlipped() *session {
|
||||
}
|
||||
|
||||
func NewSessionCache(maxItems int, clock mclock.Clock) *SessionCache {
|
||||
cache, err := simplelru.NewLRU(maxItems, nil)
|
||||
if err != nil {
|
||||
panic("can't create session cache")
|
||||
}
|
||||
return &SessionCache{
|
||||
sessions: cache,
|
||||
sessions: lru.NewBasicLRU[sessionID, *session](maxItems),
|
||||
handshakes: make(map[sessionID]*Whoareyou),
|
||||
clock: clock,
|
||||
nonceGen: generateNonce,
|
||||
@ -95,11 +91,8 @@ func (sc *SessionCache) nextNonce(s *session) (Nonce, error) {
|
||||
|
||||
// session returns the current session for the given node, if any.
|
||||
func (sc *SessionCache) session(id enode.ID, addr string) *session {
|
||||
item, ok := sc.sessions.Get(sessionID{id, addr})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return item.(*session)
|
||||
item, _ := sc.sessions.Get(sessionID{id, addr})
|
||||
return item
|
||||
}
|
||||
|
||||
// readKey returns the current read key for the given node.
|
||||
|
@ -27,12 +27,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/lru"
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"golang.org/x/sync/singleflight"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
@ -41,7 +41,7 @@ import (
|
||||
type Client struct {
|
||||
cfg Config
|
||||
clock mclock.Clock
|
||||
entries *lru.Cache
|
||||
entries *lru.Cache[string, entry]
|
||||
ratelimit *rate.Limiter
|
||||
singleflight singleflight.Group
|
||||
}
|
||||
@ -96,14 +96,10 @@ func (cfg Config) withDefaults() Config {
|
||||
// NewClient creates a client.
|
||||
func NewClient(cfg Config) *Client {
|
||||
cfg = cfg.withDefaults()
|
||||
cache, err := lru.New(cfg.CacheLimit)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
rlimit := rate.NewLimiter(rate.Limit(cfg.RateLimit), 10)
|
||||
return &Client{
|
||||
cfg: cfg,
|
||||
entries: cache,
|
||||
entries: lru.NewCache[string, entry](cfg.CacheLimit),
|
||||
clock: mclock.System{},
|
||||
ratelimit: rlimit,
|
||||
}
|
||||
@ -176,7 +172,7 @@ func (c *Client) resolveEntry(ctx context.Context, domain, hash string) (entry,
|
||||
}
|
||||
cacheKey := truncateHash(hash)
|
||||
if e, ok := c.entries.Get(cacheKey); ok {
|
||||
return e.(entry), nil
|
||||
return e, nil
|
||||
}
|
||||
|
||||
ei, err, _ := c.singleflight.Do(cacheKey, func() (interface{}, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user