core/state/snapshot: faster account iteration, CLI integration

This commit is contained in:
Martin Holst Swende 2020-01-19 20:57:56 +01:00 committed by Péter Szilágyi
parent 6ddb92a089
commit 19099421dc
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
9 changed files with 69 additions and 38 deletions

View File

@ -79,6 +79,7 @@ The dumpgenesis command dumps the genesis block configuration in JSON format to
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.GCModeFlag, utils.GCModeFlag,
utils.SnapshotFlag,
utils.CacheDatabaseFlag, utils.CacheDatabaseFlag,
utils.CacheGCFlag, utils.CacheGCFlag,
}, },

View File

@ -91,6 +91,7 @@ var (
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
utils.GCModeFlag, utils.GCModeFlag,
utils.SnapshotFlag,
utils.LightServeFlag, utils.LightServeFlag,
utils.LightLegacyServFlag, utils.LightLegacyServFlag,
utils.LightIngressFlag, utils.LightIngressFlag,

View File

@ -225,6 +225,10 @@ var (
Usage: `Blockchain garbage collection mode ("full", "archive")`, Usage: `Blockchain garbage collection mode ("full", "archive")`,
Value: "full", Value: "full",
} }
SnapshotFlag = cli.BoolFlag{
Name: "snapshot",
Usage: `Enables snapshot-database mode -- experimental work in progress feature`,
}
LightKDFFlag = cli.BoolFlag{ LightKDFFlag = cli.BoolFlag{
Name: "lightkdf", Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
@ -1471,6 +1475,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheSnapshotFlag.Name) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheSnapshotFlag.Name) {
cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100 cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100
} }
if !ctx.GlobalIsSet(SnapshotFlag.Name) {
cfg.SnapshotCache = 0 // Disabled
}
if ctx.GlobalIsSet(DocRootFlag.Name) { if ctx.GlobalIsSet(DocRootFlag.Name) {
cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name)
} }
@ -1734,6 +1741,9 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
TrieTimeLimit: eth.DefaultConfig.TrieTimeout, TrieTimeLimit: eth.DefaultConfig.TrieTimeout,
SnapshotLimit: eth.DefaultConfig.SnapshotCache, SnapshotLimit: eth.DefaultConfig.SnapshotCache,
} }
if !ctx.GlobalIsSet(SnapshotFlag.Name) {
cache.SnapshotLimit = 0 // Disabled
}
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) {
cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 cache.TrieCleanLimit = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100
} }

View File

@ -302,8 +302,9 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
} }
} }
// Load any existing snapshot, regenerating it if loading failed // Load any existing snapshot, regenerating it if loading failed
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root()) if bc.cacheConfig.SnapshotLimit > 0 {
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root())
}
// Take ownership of this particular state // Take ownership of this particular state
go bc.update() go bc.update()
return bc, nil return bc, nil
@ -498,8 +499,9 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
bc.chainmu.Unlock() bc.chainmu.Unlock()
// Destroy any existing state snapshot and regenerate it in the background // Destroy any existing state snapshot and regenerate it in the background
bc.snaps.Rebuild(block.Root()) if bc.snaps != nil {
bc.snaps.Rebuild(block.Root())
}
log.Info("Committed new head block", "number", block.Number(), "hash", hash) log.Info("Committed new head block", "number", block.Number(), "hash", hash)
return nil return nil
} }
@ -854,9 +856,12 @@ func (bc *BlockChain) Stop() {
bc.wg.Wait() bc.wg.Wait()
// Ensure that the entirety of the state snapshot is journalled to disk. // Ensure that the entirety of the state snapshot is journalled to disk.
snapBase, err := bc.snaps.Journal(bc.CurrentBlock().Root()) var snapBase common.Hash
if err != nil { if bc.snaps != nil {
log.Error("Failed to journal state snapshot", "err", err) var err error
if snapBase, err = bc.snaps.Journal(bc.CurrentBlock().Root()); err != nil {
log.Error("Failed to journal state snapshot", "err", err)
}
} }
// Ensure the state of a recent block is also stored to disk before exiting. // Ensure the state of a recent block is also stored to disk before exiting.
// We're writing three different states to catch different restart scenarios: // We're writing three different states to catch different restart scenarios:

View File

@ -23,6 +23,7 @@ import (
"math/rand" "math/rand"
"sort" "sort"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -92,7 +93,7 @@ type diffLayer struct {
memory uint64 // Approximate guess as to how much memory we use memory uint64 // Approximate guess as to how much memory we use
root common.Hash // Root hash to which this snapshot diff belongs to root common.Hash // Root hash to which this snapshot diff belongs to
stale bool // Signals that the layer became stale (state progressed) stale uint32 // Signals that the layer became stale (state progressed)
accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil
accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted) accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted)
@ -237,10 +238,7 @@ func (dl *diffLayer) Parent() snapshot {
// Stale return whether this layer has become stale (was flattened across) or if // Stale return whether this layer has become stale (was flattened across) or if
// it's still live. // it's still live.
func (dl *diffLayer) Stale() bool { func (dl *diffLayer) Stale() bool {
dl.lock.RLock() return atomic.LoadUint32(&dl.stale) != 0
defer dl.lock.RUnlock()
return dl.stale
} }
// Account directly retrieves the account associated with a particular hash in // Account directly retrieves the account associated with a particular hash in
@ -288,7 +286,7 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
// If the layer was flattened into, consider it invalid (any live reference to // If the layer was flattened into, consider it invalid (any live reference to
// the original should be marked as unusable). // the original should be marked as unusable).
if dl.stale { if dl.Stale() {
return nil, ErrSnapshotStale return nil, ErrSnapshotStale
} }
// If the account is known locally, return it. Note, a nil account means it was // If the account is known locally, return it. Note, a nil account means it was
@ -342,7 +340,7 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// If the layer was flattened into, consider it invalid (any live reference to // If the layer was flattened into, consider it invalid (any live reference to
// the original should be marked as unusable). // the original should be marked as unusable).
if dl.stale { if dl.Stale() {
return nil, ErrSnapshotStale return nil, ErrSnapshotStale
} }
// If the account is known locally, try to resolve the slot locally. Note, a nil // If the account is known locally, try to resolve the slot locally. Note, a nil
@ -401,11 +399,9 @@ func (dl *diffLayer) flatten() snapshot {
// Before actually writing all our data to the parent, first ensure that the // Before actually writing all our data to the parent, first ensure that the
// parent hasn't been 'corrupted' by someone else already flattening into it // parent hasn't been 'corrupted' by someone else already flattening into it
if parent.stale { if atomic.SwapUint32(&parent.stale, 1) != 0 {
panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo panic("parent diff layer is stale") // we've flattened into the same parent from two children, boo
} }
parent.stale = true
// Overwrite all the updated accounts blindly, merge the sorted list // Overwrite all the updated accounts blindly, merge the sorted list
for hash, data := range dl.accountData { for hash, data := range dl.accountData {
parent.accountData[hash] = data parent.accountData[hash] = data

View File

@ -64,7 +64,7 @@ type diffAccountIterator struct {
// is explicitly tracked since the referenced diff layer might go stale after // is explicitly tracked since the referenced diff layer might go stale after
// the iterator was positioned and we don't want to fail accessing the old // the iterator was positioned and we don't want to fail accessing the old
// value as long as the iterator is not touched any more. // value as long as the iterator is not touched any more.
curAccount []byte //curAccount []byte
layer *diffLayer // Live layer to retrieve values from layer *diffLayer // Live layer to retrieve values from
keys []common.Hash // Keys left in the layer to iterate keys []common.Hash // Keys left in the layer to iterate
@ -98,22 +98,13 @@ func (it *diffAccountIterator) Next() bool {
if len(it.keys) == 0 { if len(it.keys) == 0 {
return false return false
} }
// Iterator seems to be still alive, retrieve and cache the live hash and if it.layer.Stale() {
// account value, or fail now if layer became stale
it.layer.lock.RLock()
defer it.layer.lock.RUnlock()
if it.layer.stale {
it.fail, it.keys = ErrSnapshotStale, nil it.fail, it.keys = ErrSnapshotStale, nil
return false return false
} }
// Iterator seems to be still alive, retrieve and cache the live hash
it.curHash = it.keys[0] it.curHash = it.keys[0]
if blob, ok := it.layer.accountData[it.curHash]; !ok { // key cached, shift the iterator and notify the user of success
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
} else {
it.curAccount = blob
}
// Values cached, shift the iterator and notify the user of success
it.keys = it.keys[1:] it.keys = it.keys[1:]
return true return true
} }
@ -130,8 +121,22 @@ func (it *diffAccountIterator) Hash() common.Hash {
} }
// Account returns the RLP encoded slim account the iterator is currently at. // Account returns the RLP encoded slim account the iterator is currently at.
// This method may _fail_, if the underlying layer has been flattened between
// the call to Next and Acccount. That type of error will set it.Err.
// This method assumes that flattening does not delete elements from
// the accountdata mapping (writing nil into it is fine though), and will panic
// if elements have been deleted.
func (it *diffAccountIterator) Account() []byte { func (it *diffAccountIterator) Account() []byte {
return it.curAccount it.layer.lock.RLock()
blob, ok := it.layer.accountData[it.curHash]
if !ok {
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
}
it.layer.lock.RUnlock()
if it.layer.Stale() {
it.fail, it.keys = ErrSnapshotStale, nil
}
return blob
} }
// Release is a noop for diff account iterators as there are no held resources. // Release is a noop for diff account iterators as there are no held resources.

View File

@ -63,8 +63,9 @@ func (its weightedAccountIterators) Swap(i, j int) {
// fastAccountIterator is a more optimized multi-layer iterator which maintains a // fastAccountIterator is a more optimized multi-layer iterator which maintains a
// direct mapping of all iterators leading down to the bottom layer. // direct mapping of all iterators leading down to the bottom layer.
type fastAccountIterator struct { type fastAccountIterator struct {
tree *Tree // Snapshot tree to reinitialize stale sub-iterators with tree *Tree // Snapshot tree to reinitialize stale sub-iterators with
root common.Hash // Root hash to reinitialize stale sub-iterators through root common.Hash // Root hash to reinitialize stale sub-iterators through
curAccount []byte
iterators weightedAccountIterators iterators weightedAccountIterators
initiated bool initiated bool
@ -160,9 +161,20 @@ func (fi *fastAccountIterator) Next() bool {
// Don't forward first time -- we had to 'Next' once in order to // Don't forward first time -- we had to 'Next' once in order to
// do the sorting already // do the sorting already
fi.initiated = true fi.initiated = true
return true fi.curAccount = fi.iterators[0].it.Account()
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
}
return fi.Error() == nil
} }
return fi.next(0) if !fi.next(0) {
return false
}
fi.curAccount = fi.iterators[0].it.Account()
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
}
return fi.Error() == nil
} }
// next handles the next operation internally and should be invoked when we know // next handles the next operation internally and should be invoked when we know
@ -259,7 +271,7 @@ func (fi *fastAccountIterator) Hash() common.Hash {
// Account returns the current key // Account returns the current key
func (fi *fastAccountIterator) Account() []byte { func (fi *fastAccountIterator) Account() []byte {
return fi.iterators[0].it.Account() return fi.curAccount
} }
// Release iterates over all the remaining live layer iterators and releases each // Release iterates over all the remaining live layer iterators and releases each

View File

@ -210,7 +210,7 @@ func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
dl.lock.RLock() dl.lock.RLock()
defer dl.lock.RUnlock() defer dl.lock.RUnlock()
if dl.stale { if dl.Stale() {
return common.Hash{}, ErrSnapshotStale return common.Hash{}, ErrSnapshotStale
} }
// Everything below was journalled, persist this layer too // Everything below was journalled, persist this layer too

View File

@ -22,6 +22,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"sync" "sync"
"sync/atomic"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
@ -552,7 +553,7 @@ func (t *Tree) Rebuild(root common.Hash) {
case *diffLayer: case *diffLayer:
// If the layer is a simple diff, simply mark as stale // If the layer is a simple diff, simply mark as stale
layer.lock.Lock() layer.lock.Lock()
layer.stale = true atomic.StoreUint32(&layer.stale, 1)
layer.lock.Unlock() layer.lock.Unlock()
default: default: