forked from cerc-io/plugeth
trie: fix concurrent usage of secKeyBuf, ref #20920
This commit is contained in:
parent
00064ddcfb
commit
af4080b4b7
@ -59,8 +59,11 @@ var (
|
|||||||
// secureKeyPrefix is the database key prefix used to store trie node preimages.
|
// secureKeyPrefix is the database key prefix used to store trie node preimages.
|
||||||
var secureKeyPrefix = []byte("secure-key-")
|
var secureKeyPrefix = []byte("secure-key-")
|
||||||
|
|
||||||
|
// secureKeyPrefixLength is the length of the above prefix
|
||||||
|
const secureKeyPrefixLength = 11
|
||||||
|
|
||||||
// secureKeyLength is the length of the above prefix + 32byte hash.
|
// secureKeyLength is the length of the above prefix + 32byte hash.
|
||||||
const secureKeyLength = 11 + 32
|
const secureKeyLength = secureKeyPrefixLength + 32
|
||||||
|
|
||||||
// Database is an intermediate write layer between the trie data structures and
|
// Database is an intermediate write layer between the trie data structures and
|
||||||
// the disk database. The aim is to accumulate trie writes in-memory and only
|
// the disk database. The aim is to accumulate trie writes in-memory and only
|
||||||
@ -79,7 +82,6 @@ type Database struct {
|
|||||||
newest common.Hash // Newest tracked node, flush-list tail
|
newest common.Hash // Newest tracked node, flush-list tail
|
||||||
|
|
||||||
preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
|
preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
|
||||||
seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys
|
|
||||||
|
|
||||||
gctime time.Duration // Time spent on garbage collection since last commit
|
gctime time.Duration // Time spent on garbage collection since last commit
|
||||||
gcnodes uint64 // Nodes garbage collected since last commit
|
gcnodes uint64 // Nodes garbage collected since last commit
|
||||||
@ -445,15 +447,15 @@ func (db *Database) preimage(hash common.Hash) ([]byte, error) {
|
|||||||
return preimage, nil
|
return preimage, nil
|
||||||
}
|
}
|
||||||
// Content unavailable in memory, attempt to retrieve from disk
|
// Content unavailable in memory, attempt to retrieve from disk
|
||||||
return db.diskdb.Get(db.secureKey(hash[:]))
|
return db.diskdb.Get(secureKey(hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
// secureKey returns the database key for the preimage of key, as an ephemeral
|
// secureKey returns the database key for the preimage of key (as a newly
|
||||||
// buffer. The caller must not hold onto the return value because it will become
|
// allocated byte-slice)
|
||||||
// invalid on the next call.
|
func secureKey(hash common.Hash) []byte {
|
||||||
func (db *Database) secureKey(key []byte) []byte {
|
buf := make([]byte, secureKeyLength)
|
||||||
buf := append(db.seckeybuf[:0], secureKeyPrefix...)
|
copy(buf, secureKeyPrefix)
|
||||||
buf = append(buf, key...)
|
copy(buf[secureKeyPrefixLength:], hash[:])
|
||||||
return buf
|
return buf
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -596,12 +598,18 @@ func (db *Database) Cap(limit common.StorageSize) error {
|
|||||||
size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
|
size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
|
||||||
size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
|
size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
|
||||||
|
|
||||||
|
// We reuse an ephemeral buffer for the keys. The batch Put operation
|
||||||
|
// copies it internally, so we can reuse it.
|
||||||
|
var keyBuf [secureKeyLength]byte
|
||||||
|
copy(keyBuf[:], secureKeyPrefix)
|
||||||
|
|
||||||
// If the preimage cache got large enough, push to disk. If it's still small
|
// If the preimage cache got large enough, push to disk. If it's still small
|
||||||
// leave for later to deduplicate writes.
|
// leave for later to deduplicate writes.
|
||||||
flushPreimages := db.preimagesSize > 4*1024*1024
|
flushPreimages := db.preimagesSize > 4*1024*1024
|
||||||
if flushPreimages {
|
if flushPreimages {
|
||||||
for hash, preimage := range db.preimages {
|
for hash, preimage := range db.preimages {
|
||||||
if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
|
copy(keyBuf[secureKeyPrefixLength:], hash[:])
|
||||||
|
if err := batch.Put(keyBuf[:], preimage); err != nil {
|
||||||
log.Error("Failed to commit preimage from trie database", "err", err)
|
log.Error("Failed to commit preimage from trie database", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -692,9 +700,15 @@ func (db *Database) Commit(node common.Hash, report bool) error {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
batch := db.diskdb.NewBatch()
|
batch := db.diskdb.NewBatch()
|
||||||
|
|
||||||
|
// We reuse an ephemeral buffer for the keys. The batch Put operation
|
||||||
|
// copies it internally, so we can reuse it.
|
||||||
|
var keyBuf [secureKeyLength]byte
|
||||||
|
copy(keyBuf[:], secureKeyPrefix)
|
||||||
|
|
||||||
// Move all of the accumulated preimages into a write batch
|
// Move all of the accumulated preimages into a write batch
|
||||||
for hash, preimage := range db.preimages {
|
for hash, preimage := range db.preimages {
|
||||||
if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
|
copy(keyBuf[secureKeyPrefixLength:], hash[:])
|
||||||
|
if err := batch.Put(keyBuf[:], preimage); err != nil {
|
||||||
log.Error("Failed to commit preimage from trie database", "err", err)
|
log.Error("Failed to commit preimage from trie database", "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user