core/state/snapshot, tests: sync snap gen + snaps in consensus tests

This commit is contained in:
Péter Szilágyi 2020-03-03 09:10:23 +02:00
parent fe8347ea8a
commit 6e05ccd845
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
14 changed files with 90 additions and 36 deletions

View File

@ -96,7 +96,7 @@ func stateTestCmd(ctx *cli.Context) error {
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
state, err := test.Run(st, cfg)
state, err := test.Run(st, cfg, false)
// print state root for evmlab tracing
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))

View File

@ -121,6 +121,8 @@ type CacheConfig struct {
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
}
// BlockChain represents the canonical chain given a database with a genesis
@ -303,7 +305,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
}
// Load any existing snapshot, regenerating it if loading failed
if bc.cacheConfig.SnapshotLimit > 0 {
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root())
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait)
}
// Take ownership of this particular state
go bc.update()

View File

@ -37,8 +37,9 @@ type diskLayer struct {
root common.Hash // Root hash of the base snapshot
stale bool // Signals that the layer became stale (state progressed)
genMarker []byte // Marker for the state that's indexed during initial layer generation
genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
genMarker []byte // Marker for the state that's indexed during initial layer generation
genPending chan struct{} // Notification channel when generation is done (test synchronicity)
genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
lock sync.RWMutex
}

View File

@ -101,12 +101,13 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i
rawdb.WriteSnapshotRoot(diskdb, root)
base := &diskLayer{
diskdb: diskdb,
triedb: triedb,
root: root,
cache: fastcache.New(cache * 1024 * 1024),
genMarker: []byte{}, // Initialized but empty!
genAbort: make(chan chan *generatorStats),
diskdb: diskdb,
triedb: triedb,
root: root,
cache: fastcache.New(cache * 1024 * 1024),
genMarker: []byte{}, // Initialized but empty!
genPending: make(chan struct{}),
genAbort: make(chan chan *generatorStats),
}
go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
return base
@ -252,6 +253,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
dl.lock.Lock()
dl.genMarker = nil
close(dl.genPending)
dl.lock.Unlock()
// Someone will be looking for us, wait it out

View File

@ -108,6 +108,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
if base.genMarker == nil {
base.genMarker = []byte{}
}
base.genPending = make(chan struct{})
base.genAbort = make(chan chan *generatorStats)
var origin uint64

View File

@ -164,7 +164,7 @@ type Tree struct {
// If the snapshot is missing or inconsistent, the entirety is deleted and will
// be reconstructed from scratch based on the tries in the key-value store, on a
// background thread.
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *Tree {
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
// Create a new, empty snapshot tree
snap := &Tree{
diskdb: diskdb,
@ -172,6 +172,9 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
cache: cache,
layers: make(map[common.Hash]snapshot),
}
if !async {
defer snap.waitBuild()
}
// Attempt to load a previously persisted snapshot and rebuild one if failed
head, err := loadSnapshot(diskdb, triedb, cache, root)
if err != nil {
@ -187,6 +190,27 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
return snap
}
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
// to be used by tests to ensure we're testing what we believe we are.
func (t *Tree) waitBuild() {
// Find the rebuild termination channel
var done chan struct{}
t.lock.RLock()
for _, layer := range t.layers {
if layer, ok := layer.(*diskLayer); ok {
done = layer.genPending
break
}
}
t.lock.RUnlock()
// Wait until the snapshot is generated
if done != nil {
<-done
}
}
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
// snapshot is maintained for that block.
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
@ -477,11 +501,12 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
log.Crit("Failed to write leftover snapshot", "err", err)
}
res := &diskLayer{
root: bottom.root,
cache: base.cache,
diskdb: base.diskdb,
triedb: base.triedb,
genMarker: base.genMarker,
root: bottom.root,
cache: base.cache,
diskdb: base.diskdb,
triedb: base.triedb,
genMarker: base.genMarker,
genPending: base.genPending,
}
// If snapshot generation hasn't finished yet, port over all the starts and
// continue where the previous round left off.

View File

@ -168,7 +168,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
Code: []byte{},
Balance: big.NewInt(500000000000000),
}
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc)
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("prestateTracer")
@ -242,7 +242,7 @@ func TestCallTracer(t *testing.T) {
GasLimit: uint64(test.Context.GasLimit),
GasPrice: tx.GasPrice(),
}
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc)
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("callTracer")

View File

@ -45,11 +45,13 @@ func TestBlockchain(t *testing.T) {
bt.skipLoad(`.*randomStatetest94.json.*`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
if err := bt.checkFailure(t, name, test.Run()); err != nil {
t.Error(err)
if err := bt.checkFailure(t, name+"/trie", test.Run(false)); err != nil {
t.Errorf("test without snapshotter failed: %v", err)
}
if err := bt.checkFailure(t, name+"/snap", test.Run(true)); err != nil {
t.Errorf("test with snapshotter failed: %v", err)
}
})
// There is also a LegacyTests folder, containing blockchain tests generated
// prior to Istanbul. However, they are all derived from GeneralStateTests,
// which run natively, so there's no reason to run them here.

View File

@ -94,7 +94,7 @@ type btHeaderMarshaling struct {
Timestamp math.HexOrDecimal64
}
func (t *BlockTest) Run() error {
func (t *BlockTest) Run(snapshotter bool) error {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
@ -118,7 +118,12 @@ func (t *BlockTest) Run() error {
} else {
engine = ethash.NewShared()
}
chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanLimit: 0}, config, engine, vm.Config{}, nil)
cache := &core.CacheConfig{TrieCleanLimit: 0}
if snapshotter {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
}
chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil)
if err != nil {
return err
}

View File

@ -63,10 +63,17 @@ func TestState(t *testing.T) {
subtest := subtest
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
name := name + "/" + key
t.Run(key, func(t *testing.T) {
t.Run(key+"/trie", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
_, err := test.Run(subtest, vmconfig)
return st.checkFailure(t, name, err)
_, err := test.Run(subtest, vmconfig, false)
return st.checkFailure(t, name+"/trie", err)
})
})
t.Run(key+"/snap", func(t *testing.T) {
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
_, err := test.Run(subtest, vmconfig, true)
return st.checkFailure(t, name+"/snap", err)
})
})
}

View File

@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
@ -145,8 +147,8 @@ func (t *StateTest) Subtests() []StateSubtest {
}
// Run executes a specific subtest and verifies the post-state and logs
func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateDB, error) {
statedb, root, err := t.RunNoVerify(subtest, vmconfig)
func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, error) {
statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
if err != nil {
return statedb, err
}
@ -163,14 +165,14 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD
}
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config) (*state.StateDB, common.Hash, error) {
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, common.Hash, error) {
config, eips, err := getVMConfig(subtest.Fork)
if err != nil {
return nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
}
vmconfig.ExtraEips = eips
block := t.genesis(config).ToBlock(nil)
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre)
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post)
@ -204,7 +206,7 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
}
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) *state.StateDB {
sdb := state.NewDatabase(db)
statedb, _ := state.New(common.Hash{}, sdb, nil)
for addr, a := range accounts {
@ -217,7 +219,12 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(false)
statedb, _ = state.New(root, sdb, nil)
var snaps *snapshot.Tree
if snapshotter {
snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false)
}
statedb, _ = state.New(root, sdb, snaps)
return statedb
}

View File

@ -45,7 +45,6 @@ type ttFork struct {
}
func (tt *TransactionTest) Run(config *params.ChainConfig) error {
validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) {
tx := new(types.Transaction)
if err := rlp.DecodeBytes(rlpData, tx); err != nil {

View File

@ -30,7 +30,10 @@ func TestVM(t *testing.T) {
vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) {
withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error {
return vmt.checkFailure(t, name, test.Run(vmconfig))
return vmt.checkFailure(t, name+"/trie", test.Run(vmconfig, false))
})
withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error {
return vmt.checkFailure(t, name+"/snap", test.Run(vmconfig, true))
})
})
}

View File

@ -78,8 +78,8 @@ type vmExecMarshaling struct {
GasPrice *math.HexOrDecimal256
}
func (t *VMTest) Run(vmconfig vm.Config) error {
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre)
func (t *VMTest) Run(vmconfig vm.Config, snapshotter bool) error {
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
ret, gasRemaining, err := t.exec(statedb, vmconfig)
if t.json.GasRemaining == nil {