forked from cerc-io/plugeth
core/state/snapshot, tests: sync snap gen + snaps in consensus tests
This commit is contained in:
parent
fe8347ea8a
commit
6e05ccd845
@ -96,7 +96,7 @@ func stateTestCmd(ctx *cli.Context) error {
|
|||||||
for _, st := range test.Subtests() {
|
for _, st := range test.Subtests() {
|
||||||
// Run the test and aggregate the result
|
// Run the test and aggregate the result
|
||||||
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
|
||||||
state, err := test.Run(st, cfg)
|
state, err := test.Run(st, cfg, false)
|
||||||
// print state root for evmlab tracing
|
// print state root for evmlab tracing
|
||||||
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
if ctx.GlobalBool(MachineFlag.Name) && state != nil {
|
||||||
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false))
|
||||||
|
@ -121,6 +121,8 @@ type CacheConfig struct {
|
|||||||
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
|
TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
|
||||||
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
|
TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
|
||||||
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
|
SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
|
||||||
|
|
||||||
|
SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlockChain represents the canonical chain given a database with a genesis
|
// BlockChain represents the canonical chain given a database with a genesis
|
||||||
@ -303,7 +305,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
}
|
}
|
||||||
// Load any existing snapshot, regenerating it if loading failed
|
// Load any existing snapshot, regenerating it if loading failed
|
||||||
if bc.cacheConfig.SnapshotLimit > 0 {
|
if bc.cacheConfig.SnapshotLimit > 0 {
|
||||||
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root())
|
bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, bc.CurrentBlock().Root(), !bc.cacheConfig.SnapshotWait)
|
||||||
}
|
}
|
||||||
// Take ownership of this particular state
|
// Take ownership of this particular state
|
||||||
go bc.update()
|
go bc.update()
|
||||||
|
@ -37,8 +37,9 @@ type diskLayer struct {
|
|||||||
root common.Hash // Root hash of the base snapshot
|
root common.Hash // Root hash of the base snapshot
|
||||||
stale bool // Signals that the layer became stale (state progressed)
|
stale bool // Signals that the layer became stale (state progressed)
|
||||||
|
|
||||||
genMarker []byte // Marker for the state that's indexed during initial layer generation
|
genMarker []byte // Marker for the state that's indexed during initial layer generation
|
||||||
genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
|
genPending chan struct{} // Notification channel when generation is done (test synchronicity)
|
||||||
|
genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
|
||||||
|
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
}
|
}
|
||||||
|
@ -101,12 +101,13 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i
|
|||||||
rawdb.WriteSnapshotRoot(diskdb, root)
|
rawdb.WriteSnapshotRoot(diskdb, root)
|
||||||
|
|
||||||
base := &diskLayer{
|
base := &diskLayer{
|
||||||
diskdb: diskdb,
|
diskdb: diskdb,
|
||||||
triedb: triedb,
|
triedb: triedb,
|
||||||
root: root,
|
root: root,
|
||||||
cache: fastcache.New(cache * 1024 * 1024),
|
cache: fastcache.New(cache * 1024 * 1024),
|
||||||
genMarker: []byte{}, // Initialized but empty!
|
genMarker: []byte{}, // Initialized but empty!
|
||||||
genAbort: make(chan chan *generatorStats),
|
genPending: make(chan struct{}),
|
||||||
|
genAbort: make(chan chan *generatorStats),
|
||||||
}
|
}
|
||||||
go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
|
go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
|
||||||
return base
|
return base
|
||||||
@ -252,6 +253,7 @@ func (dl *diskLayer) generate(stats *generatorStats) {
|
|||||||
|
|
||||||
dl.lock.Lock()
|
dl.lock.Lock()
|
||||||
dl.genMarker = nil
|
dl.genMarker = nil
|
||||||
|
close(dl.genPending)
|
||||||
dl.lock.Unlock()
|
dl.lock.Unlock()
|
||||||
|
|
||||||
// Someone will be looking for us, wait it out
|
// Someone will be looking for us, wait it out
|
||||||
|
@ -108,6 +108,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int,
|
|||||||
if base.genMarker == nil {
|
if base.genMarker == nil {
|
||||||
base.genMarker = []byte{}
|
base.genMarker = []byte{}
|
||||||
}
|
}
|
||||||
|
base.genPending = make(chan struct{})
|
||||||
base.genAbort = make(chan chan *generatorStats)
|
base.genAbort = make(chan chan *generatorStats)
|
||||||
|
|
||||||
var origin uint64
|
var origin uint64
|
||||||
|
@ -164,7 +164,7 @@ type Tree struct {
|
|||||||
// If the snapshot is missing or inconsistent, the entirety is deleted and will
|
// If the snapshot is missing or inconsistent, the entirety is deleted and will
|
||||||
// be reconstructed from scratch based on the tries in the key-value store, on a
|
// be reconstructed from scratch based on the tries in the key-value store, on a
|
||||||
// background thread.
|
// background thread.
|
||||||
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *Tree {
|
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
|
||||||
// Create a new, empty snapshot tree
|
// Create a new, empty snapshot tree
|
||||||
snap := &Tree{
|
snap := &Tree{
|
||||||
diskdb: diskdb,
|
diskdb: diskdb,
|
||||||
@ -172,6 +172,9 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
|
|||||||
cache: cache,
|
cache: cache,
|
||||||
layers: make(map[common.Hash]snapshot),
|
layers: make(map[common.Hash]snapshot),
|
||||||
}
|
}
|
||||||
|
if !async {
|
||||||
|
defer snap.waitBuild()
|
||||||
|
}
|
||||||
// Attempt to load a previously persisted snapshot and rebuild one if failed
|
// Attempt to load a previously persisted snapshot and rebuild one if failed
|
||||||
head, err := loadSnapshot(diskdb, triedb, cache, root)
|
head, err := loadSnapshot(diskdb, triedb, cache, root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -187,6 +190,27 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
|
|||||||
return snap
|
return snap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// waitBuild blocks until the snapshot finishes rebuilding. This method is meant
|
||||||
|
// to be used by tests to ensure we're testing what we believe we are.
|
||||||
|
func (t *Tree) waitBuild() {
|
||||||
|
// Find the rebuild termination channel
|
||||||
|
var done chan struct{}
|
||||||
|
|
||||||
|
t.lock.RLock()
|
||||||
|
for _, layer := range t.layers {
|
||||||
|
if layer, ok := layer.(*diskLayer); ok {
|
||||||
|
done = layer.genPending
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.lock.RUnlock()
|
||||||
|
|
||||||
|
// Wait until the snapshot is generated
|
||||||
|
if done != nil {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
|
// Snapshot retrieves a snapshot belonging to the given block root, or nil if no
|
||||||
// snapshot is maintained for that block.
|
// snapshot is maintained for that block.
|
||||||
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
|
func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
|
||||||
@ -477,11 +501,12 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
|
|||||||
log.Crit("Failed to write leftover snapshot", "err", err)
|
log.Crit("Failed to write leftover snapshot", "err", err)
|
||||||
}
|
}
|
||||||
res := &diskLayer{
|
res := &diskLayer{
|
||||||
root: bottom.root,
|
root: bottom.root,
|
||||||
cache: base.cache,
|
cache: base.cache,
|
||||||
diskdb: base.diskdb,
|
diskdb: base.diskdb,
|
||||||
triedb: base.triedb,
|
triedb: base.triedb,
|
||||||
genMarker: base.genMarker,
|
genMarker: base.genMarker,
|
||||||
|
genPending: base.genPending,
|
||||||
}
|
}
|
||||||
// If snapshot generation hasn't finished yet, port over all the starts and
|
// If snapshot generation hasn't finished yet, port over all the starts and
|
||||||
// continue where the previous round left off.
|
// continue where the previous round left off.
|
||||||
|
@ -168,7 +168,7 @@ func TestPrestateTracerCreate2(t *testing.T) {
|
|||||||
Code: []byte{},
|
Code: []byte{},
|
||||||
Balance: big.NewInt(500000000000000),
|
Balance: big.NewInt(500000000000000),
|
||||||
}
|
}
|
||||||
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc)
|
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
|
||||||
|
|
||||||
// Create the tracer, the EVM environment and run it
|
// Create the tracer, the EVM environment and run it
|
||||||
tracer, err := New("prestateTracer")
|
tracer, err := New("prestateTracer")
|
||||||
@ -242,7 +242,7 @@ func TestCallTracer(t *testing.T) {
|
|||||||
GasLimit: uint64(test.Context.GasLimit),
|
GasLimit: uint64(test.Context.GasLimit),
|
||||||
GasPrice: tx.GasPrice(),
|
GasPrice: tx.GasPrice(),
|
||||||
}
|
}
|
||||||
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc)
|
statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
|
||||||
|
|
||||||
// Create the tracer, the EVM environment and run it
|
// Create the tracer, the EVM environment and run it
|
||||||
tracer, err := New("callTracer")
|
tracer, err := New("callTracer")
|
||||||
|
@ -45,11 +45,13 @@ func TestBlockchain(t *testing.T) {
|
|||||||
bt.skipLoad(`.*randomStatetest94.json.*`)
|
bt.skipLoad(`.*randomStatetest94.json.*`)
|
||||||
|
|
||||||
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
|
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
|
||||||
if err := bt.checkFailure(t, name, test.Run()); err != nil {
|
if err := bt.checkFailure(t, name+"/trie", test.Run(false)); err != nil {
|
||||||
t.Error(err)
|
t.Errorf("test without snapshotter failed: %v", err)
|
||||||
|
}
|
||||||
|
if err := bt.checkFailure(t, name+"/snap", test.Run(true)); err != nil {
|
||||||
|
t.Errorf("test with snapshotter failed: %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// There is also a LegacyTests folder, containing blockchain tests generated
|
// There is also a LegacyTests folder, containing blockchain tests generated
|
||||||
// prior to Istanbul. However, they are all derived from GeneralStateTests,
|
// prior to Istanbul. However, they are all derived from GeneralStateTests,
|
||||||
// which run natively, so there's no reason to run them here.
|
// which run natively, so there's no reason to run them here.
|
||||||
|
@ -94,7 +94,7 @@ type btHeaderMarshaling struct {
|
|||||||
Timestamp math.HexOrDecimal64
|
Timestamp math.HexOrDecimal64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *BlockTest) Run() error {
|
func (t *BlockTest) Run(snapshotter bool) error {
|
||||||
config, ok := Forks[t.json.Network]
|
config, ok := Forks[t.json.Network]
|
||||||
if !ok {
|
if !ok {
|
||||||
return UnsupportedForkError{t.json.Network}
|
return UnsupportedForkError{t.json.Network}
|
||||||
@ -118,7 +118,12 @@ func (t *BlockTest) Run() error {
|
|||||||
} else {
|
} else {
|
||||||
engine = ethash.NewShared()
|
engine = ethash.NewShared()
|
||||||
}
|
}
|
||||||
chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieCleanLimit: 0}, config, engine, vm.Config{}, nil)
|
cache := &core.CacheConfig{TrieCleanLimit: 0}
|
||||||
|
if snapshotter {
|
||||||
|
cache.SnapshotLimit = 1
|
||||||
|
cache.SnapshotWait = true
|
||||||
|
}
|
||||||
|
chain, err := core.NewBlockChain(db, cache, config, engine, vm.Config{}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -63,10 +63,17 @@ func TestState(t *testing.T) {
|
|||||||
subtest := subtest
|
subtest := subtest
|
||||||
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
|
key := fmt.Sprintf("%s/%d", subtest.Fork, subtest.Index)
|
||||||
name := name + "/" + key
|
name := name + "/" + key
|
||||||
t.Run(key, func(t *testing.T) {
|
|
||||||
|
t.Run(key+"/trie", func(t *testing.T) {
|
||||||
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
|
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
|
||||||
_, err := test.Run(subtest, vmconfig)
|
_, err := test.Run(subtest, vmconfig, false)
|
||||||
return st.checkFailure(t, name, err)
|
return st.checkFailure(t, name+"/trie", err)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
t.Run(key+"/snap", func(t *testing.T) {
|
||||||
|
withTrace(t, test.gasLimit(subtest), func(vmconfig vm.Config) error {
|
||||||
|
_, err := test.Run(subtest, vmconfig, true)
|
||||||
|
return st.checkFailure(t, name+"/snap", err)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
@ -145,8 +147,8 @@ func (t *StateTest) Subtests() []StateSubtest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run executes a specific subtest and verifies the post-state and logs
|
// Run executes a specific subtest and verifies the post-state and logs
|
||||||
func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateDB, error) {
|
func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, error) {
|
||||||
statedb, root, err := t.RunNoVerify(subtest, vmconfig)
|
statedb, root, err := t.RunNoVerify(subtest, vmconfig, snapshotter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return statedb, err
|
return statedb, err
|
||||||
}
|
}
|
||||||
@ -163,14 +165,14 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
|
// RunNoVerify runs a specific subtest and returns the statedb and post-state root
|
||||||
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config) (*state.StateDB, common.Hash, error) {
|
func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapshotter bool) (*state.StateDB, common.Hash, error) {
|
||||||
config, eips, err := getVMConfig(subtest.Fork)
|
config, eips, err := getVMConfig(subtest.Fork)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
|
return nil, common.Hash{}, UnsupportedForkError{subtest.Fork}
|
||||||
}
|
}
|
||||||
vmconfig.ExtraEips = eips
|
vmconfig.ExtraEips = eips
|
||||||
block := t.genesis(config).ToBlock(nil)
|
block := t.genesis(config).ToBlock(nil)
|
||||||
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre)
|
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
|
||||||
|
|
||||||
post := t.json.Post[subtest.Fork][subtest.Index]
|
post := t.json.Post[subtest.Fork][subtest.Index]
|
||||||
msg, err := t.json.Tx.toMessage(post)
|
msg, err := t.json.Tx.toMessage(post)
|
||||||
@ -204,7 +206,7 @@ func (t *StateTest) gasLimit(subtest StateSubtest) uint64 {
|
|||||||
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
|
return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas]
|
||||||
}
|
}
|
||||||
|
|
||||||
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB {
|
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) *state.StateDB {
|
||||||
sdb := state.NewDatabase(db)
|
sdb := state.NewDatabase(db)
|
||||||
statedb, _ := state.New(common.Hash{}, sdb, nil)
|
statedb, _ := state.New(common.Hash{}, sdb, nil)
|
||||||
for addr, a := range accounts {
|
for addr, a := range accounts {
|
||||||
@ -217,7 +219,12 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB
|
|||||||
}
|
}
|
||||||
// Commit and re-open to start with a clean state.
|
// Commit and re-open to start with a clean state.
|
||||||
root, _ := statedb.Commit(false)
|
root, _ := statedb.Commit(false)
|
||||||
statedb, _ = state.New(root, sdb, nil)
|
|
||||||
|
var snaps *snapshot.Tree
|
||||||
|
if snapshotter {
|
||||||
|
snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false)
|
||||||
|
}
|
||||||
|
statedb, _ = state.New(root, sdb, snaps)
|
||||||
return statedb
|
return statedb
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -45,7 +45,6 @@ type ttFork struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tt *TransactionTest) Run(config *params.ChainConfig) error {
|
func (tt *TransactionTest) Run(config *params.ChainConfig) error {
|
||||||
|
|
||||||
validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) {
|
validateTx := func(rlpData hexutil.Bytes, signer types.Signer, isHomestead bool, isIstanbul bool) (*common.Address, *common.Hash, error) {
|
||||||
tx := new(types.Transaction)
|
tx := new(types.Transaction)
|
||||||
if err := rlp.DecodeBytes(rlpData, tx); err != nil {
|
if err := rlp.DecodeBytes(rlpData, tx); err != nil {
|
||||||
|
@ -30,7 +30,10 @@ func TestVM(t *testing.T) {
|
|||||||
|
|
||||||
vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) {
|
vmt.walk(t, vmTestDir, func(t *testing.T, name string, test *VMTest) {
|
||||||
withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error {
|
withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error {
|
||||||
return vmt.checkFailure(t, name, test.Run(vmconfig))
|
return vmt.checkFailure(t, name+"/trie", test.Run(vmconfig, false))
|
||||||
|
})
|
||||||
|
withTrace(t, test.json.Exec.GasLimit, func(vmconfig vm.Config) error {
|
||||||
|
return vmt.checkFailure(t, name+"/snap", test.Run(vmconfig, true))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -78,8 +78,8 @@ type vmExecMarshaling struct {
|
|||||||
GasPrice *math.HexOrDecimal256
|
GasPrice *math.HexOrDecimal256
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *VMTest) Run(vmconfig vm.Config) error {
|
func (t *VMTest) Run(vmconfig vm.Config, snapshotter bool) error {
|
||||||
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre)
|
statedb := MakePreState(rawdb.NewMemoryDatabase(), t.json.Pre, snapshotter)
|
||||||
ret, gasRemaining, err := t.exec(statedb, vmconfig)
|
ret, gasRemaining, err := t.exec(statedb, vmconfig)
|
||||||
|
|
||||||
if t.json.GasRemaining == nil {
|
if t.json.GasRemaining == nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user