trie/triedb/hashdb: take lock around access to dirties cache (#28542)

Add read locking of db lock around access to dirties cache in hashdb.Database to prevent
data race versus hashdb.Database.dereference which can modify the dirities map by deleting
an item.

Fixes #28541

---------

Co-authored-by: Gary Rong <garyrong0905@gmail.com>
This commit is contained in:
Maciej Kulawik 2023-11-30 09:50:48 +00:00 committed by GitHub
parent ab0eb46a84
commit fa0df76f3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 23 additions and 57 deletions

View File

@ -240,17 +240,6 @@ func (db *Database) Dereference(root common.Hash) error {
return nil return nil
} }
// Node retrieves the rlp-encoded node blob with provided node hash. It's
// only supported by hash-based database and will return an error for others.
// Note, this function should be deprecated once ETH66 is deprecated.
func (db *Database) Node(hash common.Hash) ([]byte, error) {
hdb, ok := db.backend.(*hashdb.Database)
if !ok {
return nil, errors.New("not supported")
}
return hdb.Node(hash)
}
// Recover rollbacks the database to a specified historical point. The state is // Recover rollbacks the database to a specified historical point. The state is
// supported as the rollback destination only if it's canonical state and the // supported as the rollback destination only if it's canonical state and the
// corresponding trie histories are existent. It's only supported by path-based // corresponding trie histories are existent. It's only supported by path-based

View File

@ -82,11 +82,6 @@ var Defaults = &Config{
// Database is an intermediate write layer between the trie data structures and // Database is an intermediate write layer between the trie data structures and
// the disk database. The aim is to accumulate trie writes in-memory and only // the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder. // periodically flush a couple tries to disk, garbage collecting the remainder.
//
// Note, the trie Database is **not** thread safe in its mutations, but it **is**
// thread safe in providing individual, independent node access. The rationale
// behind this split design is to provide read access to RPC handlers and sync
// servers even while the trie is executing expensive garbage collection.
type Database struct { type Database struct {
diskdb ethdb.Database // Persistent storage for matured trie nodes diskdb ethdb.Database // Persistent storage for matured trie nodes
resolver ChildResolver // The handler to resolve children of nodes resolver ChildResolver // The handler to resolve children of nodes
@ -113,7 +108,7 @@ type Database struct {
// cachedNode is all the information we know about a single cached trie node // cachedNode is all the information we know about a single cached trie node
// in the memory database write layer. // in the memory database write layer.
type cachedNode struct { type cachedNode struct {
node []byte // Encoded node blob node []byte // Encoded node blob, immutable
parents uint32 // Number of live nodes referencing this one parents uint32 // Number of live nodes referencing this one
external map[common.Hash]struct{} // The set of external children external map[common.Hash]struct{} // The set of external children
flushPrev common.Hash // Previous node in the flush-list flushPrev common.Hash // Previous node in the flush-list
@ -152,9 +147,9 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
} }
} }
// insert inserts a simplified trie node into the memory database. // insert inserts a trie node into the memory database. All nodes inserted by
// All nodes inserted by this function will be reference tracked // this function will be reference tracked. This function assumes the lock is
// and in theory should only used for **trie nodes** insertion. // already held.
func (db *Database) insert(hash common.Hash, node []byte) { func (db *Database) insert(hash common.Hash, node []byte) {
// If the node's already cached, skip // If the node's already cached, skip
if _, ok := db.dirties[hash]; ok { if _, ok := db.dirties[hash]; ok {
@ -183,9 +178,9 @@ func (db *Database) insert(hash common.Hash, node []byte) {
db.dirtiesSize += common.StorageSize(common.HashLength + len(node)) db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
} }
// Node retrieves an encoded cached trie node from memory. If it cannot be found // node retrieves an encoded cached trie node from memory. If it cannot be found
// cached, the method queries the persistent database for the content. // cached, the method queries the persistent database for the content.
func (db *Database) Node(hash common.Hash) ([]byte, error) { func (db *Database) node(hash common.Hash) ([]byte, error) {
// It doesn't make sense to retrieve the metaroot // It doesn't make sense to retrieve the metaroot
if hash == (common.Hash{}) { if hash == (common.Hash{}) {
return nil, errors.New("not found") return nil, errors.New("not found")
@ -198,11 +193,14 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
return enc, nil return enc, nil
} }
} }
// Retrieve the node from the dirty cache if available // Retrieve the node from the dirty cache if available.
db.lock.RLock() db.lock.RLock()
dirty := db.dirties[hash] dirty := db.dirties[hash]
db.lock.RUnlock() db.lock.RUnlock()
// Return the cached node if it's found in the dirty set.
// The dirty.node field is immutable and safe to read it
// even without lock guard.
if dirty != nil { if dirty != nil {
memcacheDirtyHitMeter.Mark(1) memcacheDirtyHitMeter.Mark(1)
memcacheDirtyReadMeter.Mark(int64(len(dirty.node))) memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
@ -223,20 +221,6 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
return nil, errors.New("not found") return nil, errors.New("not found")
} }
// Nodes retrieves the hashes of all the nodes cached within the memory database.
// This method is extremely expensive and should only be used to validate internal
// states in test code.
func (db *Database) Nodes() []common.Hash {
db.lock.RLock()
defer db.lock.RUnlock()
var hashes = make([]common.Hash, 0, len(db.dirties))
for hash := range db.dirties {
hashes = append(hashes, hash)
}
return hashes
}
// Reference adds a new reference from a parent node to a child node. // Reference adds a new reference from a parent node to a child node.
// This function is used to add reference between internal trie node // This function is used to add reference between internal trie node
// and external node(e.g. storage trie root), all internal trie nodes // and external node(e.g. storage trie root), all internal trie nodes
@ -344,16 +328,16 @@ func (db *Database) dereference(hash common.Hash) {
// Cap iteratively flushes old but still referenced trie nodes until the total // Cap iteratively flushes old but still referenced trie nodes until the total
// memory usage goes below the given threshold. // memory usage goes below the given threshold.
//
// Note, this method is a non-synchronized mutator. It is unsafe to call this
// concurrently with other mutators.
func (db *Database) Cap(limit common.StorageSize) error { func (db *Database) Cap(limit common.StorageSize) error {
db.lock.Lock()
defer db.lock.Unlock()
// Create a database batch to flush persistent data out. It is important that // Create a database batch to flush persistent data out. It is important that
// outside code doesn't see an inconsistent state (referenced data removed from // outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured // memory cache during commit but not yet in persistent storage). This is ensured
// by only uncaching existing data when the database write finalizes. // by only uncaching existing data when the database write finalizes.
nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
batch := db.diskdb.NewBatch() batch := db.diskdb.NewBatch()
nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
// db.dirtiesSize only contains the useful data in the cache, but when reporting // db.dirtiesSize only contains the useful data in the cache, but when reporting
// the total memory consumption, the maintenance metadata is also needed to be // the total memory consumption, the maintenance metadata is also needed to be
@ -391,9 +375,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
return err return err
} }
// Write successful, clear out the flushed data // Write successful, clear out the flushed data
db.lock.Lock()
defer db.lock.Unlock()
for db.oldest != oldest { for db.oldest != oldest {
node := db.dirties[db.oldest] node := db.dirties[db.oldest]
delete(db.dirties, db.oldest) delete(db.dirties, db.oldest)
@ -424,10 +405,10 @@ func (db *Database) Cap(limit common.StorageSize) error {
// Commit iterates over all the children of a particular node, writes them out // Commit iterates over all the children of a particular node, writes them out
// to disk, forcefully tearing down all references in both directions. As a side // to disk, forcefully tearing down all references in both directions. As a side
// effect, all pre-images accumulated up to this point are also written. // effect, all pre-images accumulated up to this point are also written.
//
// Note, this method is a non-synchronized mutator. It is unsafe to call this
// concurrently with other mutators.
func (db *Database) Commit(node common.Hash, report bool) error { func (db *Database) Commit(node common.Hash, report bool) error {
db.lock.Lock()
defer db.lock.Unlock()
// Create a database batch to flush persistent data out. It is important that // Create a database batch to flush persistent data out. It is important that
// outside code doesn't see an inconsistent state (referenced data removed from // outside code doesn't see an inconsistent state (referenced data removed from
// memory cache during commit but not yet in persistent storage). This is ensured // memory cache during commit but not yet in persistent storage). This is ensured
@ -449,8 +430,6 @@ func (db *Database) Commit(node common.Hash, report bool) error {
return err return err
} }
// Uncache any leftovers in the last batch // Uncache any leftovers in the last batch
db.lock.Lock()
defer db.lock.Unlock()
if err := batch.Replay(uncacher); err != nil { if err := batch.Replay(uncacher); err != nil {
return err return err
} }
@ -499,13 +478,11 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
return err return err
} }
db.lock.Lock()
err := batch.Replay(uncacher) err := batch.Replay(uncacher)
batch.Reset()
db.lock.Unlock()
if err != nil { if err != nil {
return err return err
} }
batch.Reset()
} }
return nil return nil
} }
@ -574,7 +551,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool {
func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
// Ensure the parent state is present and signal a warning if not. // Ensure the parent state is present and signal a warning if not.
if parent != types.EmptyRootHash { if parent != types.EmptyRootHash {
if blob, _ := db.Node(parent); len(blob) == 0 { if blob, _ := db.node(parent); len(blob) == 0 {
log.Error("parent state is not present") log.Error("parent state is not present")
} }
} }
@ -655,7 +632,7 @@ func (db *Database) Scheme() string {
// Reader retrieves a node reader belonging to the given state root. // Reader retrieves a node reader belonging to the given state root.
// An error will be returned if the requested state is not available. // An error will be returned if the requested state is not available.
func (db *Database) Reader(root common.Hash) (*reader, error) { func (db *Database) Reader(root common.Hash) (*reader, error) {
if _, err := db.Node(root); err != nil { if _, err := db.node(root); err != nil {
return nil, fmt.Errorf("state %#x is not available, %v", root, err) return nil, fmt.Errorf("state %#x is not available, %v", root, err)
} }
return &reader{db: db}, nil return &reader{db: db}, nil
@ -666,9 +643,9 @@ type reader struct {
db *Database db *Database
} }
// Node retrieves the trie node with the given node hash. // Node retrieves the trie node with the given node hash. No error will be
// No error will be returned if the node is not found. // returned if the node is not found.
func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
blob, _ := reader.db.Node(hash) blob, _ := reader.db.node(hash)
return blob, nil return blob, nil
} }