core, eth, trie: expose more detailed dirty ram tracking for diff layers (#27971)

This commit is contained in:
Péter Szilágyi 2023-08-23 14:08:39 +03:00 committed by GitHub
parent ab3762b2d9
commit 0c6bbeb423
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 65 additions and 27 deletions

View File

@ -1021,7 +1021,7 @@ func (bc *BlockChain) Stop() {
for !bc.triegc.Empty() { for !bc.triegc.Empty() {
triedb.Dereference(bc.triegc.PopItem()) triedb.Dereference(bc.triegc.PopItem())
} }
if size, _ := triedb.Size(); size != 0 { if _, nodes, _ := triedb.Size(); nodes != 0 { // all memory is contained within the nodes return for hashdb
log.Error("Dangling trie nodes after full cleanup") log.Error("Dangling trie nodes after full cleanup")
} }
} }
@ -1429,8 +1429,8 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
} }
// If we exceeded our memory allowance, flush matured singleton nodes to disk // If we exceeded our memory allowance, flush matured singleton nodes to disk
var ( var (
nodes, imgs = bc.triedb.Size() _, nodes, imgs = bc.triedb.Size() // all memory is contained within the nodes return for hashdb
limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024 limit = common.StorageSize(bc.cacheConfig.TrieDirtyLimit) * 1024 * 1024
) )
if nodes > limit || imgs > 4*1024*1024 { if nodes > limit || imgs > 4*1024*1024 {
bc.triedb.Cap(limit - ethdb.IdealBatchSize) bc.triedb.Cap(limit - ethdb.IdealBatchSize)
@ -1866,8 +1866,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
stats.processed++ stats.processed++
stats.usedGas += usedGas stats.usedGas += usedGas
dirty, _ := bc.triedb.Size() var snapDiffItems, snapBufItems common.StorageSize
stats.report(chain, it.index, dirty, setHead) if bc.snaps != nil {
snapDiffItems, snapBufItems = bc.snaps.Size()
}
trieDiffNodes, trieBufNodes, _ := bc.triedb.Size()
stats.report(chain, it.index, snapDiffItems, snapBufItems, trieDiffNodes, trieBufNodes, setHead)
if !setHead { if !setHead {
// After merge we expect few side chains. Simply count // After merge we expect few side chains. Simply count

View File

@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed // report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message. // or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) { func (st *insertStats) report(chain []*types.Block, index int, snapDiffItems, snapBufItems, trieDiffNodes, triebufNodes common.StorageSize, setHead bool) {
// Fetch the timings for the batch // Fetch the timings for the batch
var ( var (
now = mclock.Now() now = mclock.Now()
@ -63,7 +63,16 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
} }
context = append(context, []interface{}{"dirty", dirty}...) if snapDiffItems != 0 || snapBufItems != 0 { // snapshots enabled
context = append(context, []interface{}{"snapdiffs", snapDiffItems}...)
if snapBufItems != 0 { // future snapshot refactor
context = append(context, []interface{}{"snapdirty", snapBufItems}...)
}
}
if trieDiffNodes != 0 { // pathdb
context = append(context, []interface{}{"triediffs", trieDiffNodes}...)
}
context = append(context, []interface{}{"triedirty", triebufNodes}...)
if st.queued > 0 { if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...) context = append(context, []interface{}{"queued", st.queued}...)

View File

@ -1844,7 +1844,7 @@ func TestTrieForkGC(t *testing.T) {
chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
} }
if nodes, _ := chain.TrieDB().Size(); nodes > 0 { if _, nodes, _ := chain.TrieDB().Size(); nodes > 0 { // all memory is returned in the nodes return for hashdb
t.Fatalf("stale tries still alive after garbase collection") t.Fatalf("stale tries still alive after garbase collection")
} }
} }

View File

@ -852,3 +852,21 @@ func (t *Tree) DiskRoot() common.Hash {
return t.diskRoot() return t.diskRoot()
} }
// Size returns the memory usage of the diff layers above the disk layer and the
// dirty nodes buffered in the disk layer. Currently, the implementation uses a
// special diff layer (the first) as an aggregator simulating a dirty buffer, so
// the second return will always be 0. However, this will be made consistent with
// the pathdb, which will require a second return.
func (t *Tree) Size() (diffs common.StorageSize, buf common.StorageSize) {
t.lock.RLock()
defer t.lock.RUnlock()
var size common.StorageSize
for _, layer := range t.layers {
if layer, ok := layer.(*diffLayer); ok {
size += common.StorageSize(layer.memory)
}
}
return size, 0
}

View File

@ -168,7 +168,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
parent = root parent = root
} }
if report { if report {
nodes, imgs := triedb.Size() _, nodes, imgs := triedb.Size() // all memory is contained within the nodes return in hashdb
log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs)
} }
return statedb, func() { triedb.Dereference(block.Root()) }, nil return statedb, func() { triedb.Dereference(block.Root()) }, nil

View File

@ -369,8 +369,8 @@ func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed
// if the relevant state is available in disk. // if the relevant state is available in disk.
var preferDisk bool var preferDisk bool
if statedb != nil { if statedb != nil {
s1, s2 := statedb.Database().TrieDB().Size() s1, s2, s3 := statedb.Database().TrieDB().Size()
preferDisk = s1+s2 > defaultTracechainMemLimit preferDisk = s1+s2+s3 > defaultTracechainMemLimit
} }
statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk)
if err != nil { if err != nil {

View File

@ -55,9 +55,12 @@ type backend interface {
// according to the state scheme. // according to the state scheme.
Initialized(genesisRoot common.Hash) bool Initialized(genesisRoot common.Hash) bool
// Size returns the current storage size of the memory cache in front of the // Size returns the current storage size of the diff layers on top of the
// persistent database layer. // disk layer and the storage size of the nodes cached in the disk layer.
Size() common.StorageSize //
// For hash scheme, there is no differentiation between diff layer nodes
// and dirty disk layer nodes, so both are merged into the second return.
Size() (common.StorageSize, common.StorageSize)
// Update performs a state transition by committing dirty nodes contained // Update performs a state transition by committing dirty nodes contained
// in the given set in order to update state from the specified parent to // in the given set in order to update state from the specified parent to
@ -165,18 +168,19 @@ func (db *Database) Commit(root common.Hash, report bool) error {
return db.backend.Commit(root, report) return db.backend.Commit(root, report)
} }
// Size returns the storage size of dirty trie nodes in front of the persistent // Size returns the storage size of diff layer nodes above the persistent disk
// database and the size of cached preimages. // layer, the dirty nodes buffered within the disk layer, and the size of cached
func (db *Database) Size() (common.StorageSize, common.StorageSize) { // preimages.
func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) {
var ( var (
storages common.StorageSize diffs, nodes common.StorageSize
preimages common.StorageSize preimages common.StorageSize
) )
storages = db.backend.Size() diffs, nodes = db.backend.Size()
if db.preimages != nil { if db.preimages != nil {
preimages = db.preimages.size() preimages = db.preimages.size()
} }
return storages, preimages return diffs, nodes, preimages
} }
// Initialized returns an indicator if the state data is already initialized // Initialized returns an indicator if the state data is already initialized

View File

@ -624,7 +624,10 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n
// Size returns the current storage size of the memory cache in front of the // Size returns the current storage size of the memory cache in front of the
// persistent database layer. // persistent database layer.
func (db *Database) Size() common.StorageSize { //
// The first return will always be 0, representing the memory stored in unbounded
// diff layers above the dirty cache. This is only available in pathdb.
func (db *Database) Size() (common.StorageSize, common.StorageSize) {
db.lock.RLock() db.lock.RLock()
defer db.lock.RUnlock() defer db.lock.RUnlock()
@ -632,7 +635,7 @@ func (db *Database) Size() common.StorageSize {
// the total memory consumption, the maintenance metadata is also needed to be // the total memory consumption, the maintenance metadata is also needed to be
// counted. // counted.
var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize) var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
return db.dirtiesSize + db.childrenSize + metadataSize return 0, db.dirtiesSize + db.childrenSize + metadataSize
} }
// Close closes the trie database and releases all held resources. // Close closes the trie database and releases all held resources.

View File

@ -383,16 +383,16 @@ func (db *Database) Close() error {
// Size returns the current storage size of the memory cache in front of the // Size returns the current storage size of the memory cache in front of the
// persistent database layer. // persistent database layer.
func (db *Database) Size() (size common.StorageSize) { func (db *Database) Size() (diffs common.StorageSize, nodes common.StorageSize) {
db.tree.forEach(func(layer layer) { db.tree.forEach(func(layer layer) {
if diff, ok := layer.(*diffLayer); ok { if diff, ok := layer.(*diffLayer); ok {
size += common.StorageSize(diff.memory) diffs += common.StorageSize(diff.memory)
} }
if disk, ok := layer.(*diskLayer); ok { if disk, ok := layer.(*diskLayer); ok {
size += disk.size() nodes += disk.size()
} }
}) })
return size return diffs, nodes
} }
// Initialized returns an indicator if the state data is already // Initialized returns an indicator if the state data is already